You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by pt...@apache.org on 2014/04/21 21:44:00 UTC

[01/50] [abbrv] git commit: updated group id, prepared for tag

Repository: incubator-storm
Updated Branches:
  refs/heads/master 6cc9bfe1c -> c4ea0caee


updated group id, prepared for tag


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/a440ecc0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/a440ecc0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/a440ecc0

Branch: refs/heads/master
Commit: a440ecc05f7bae6b3cea6b6c9d0450a577b42d58
Parents: a4c30eb
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Dec 22 13:57:29 2013 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Dec 22 13:57:29 2013 +0000

----------------------------------------------------------------------
 pom.xml | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/a440ecc0/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index d370aba..430ecef 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2,12 +2,21 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-    <groupId>org.clojars.wurstmeister</groupId>
+    <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.2.0-SNAPSHOT</version>
+    <version>0.2.0</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
+    <licenses>
+        <license>
+            <name>The Apache Software License, Version 2.0</name>
+            <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+            <distribution>repo</distribution>
+            <comments>A business-friendly OSS license</comments>
+        </license>
+    </licenses>
+    <url>https://github.com/wurstmeister/storm-kafka-0.8-plus</url>
     <scm>
         <connection>scm:git:git://github.com/wurstmeister/storm-kafka-0.8-plus.git</connection>
         <developerConnection>scm:git:ssh://git@github.com/wurstmeister/storm-kafka-0.8-plus.git</developerConnection>


[13/50] [abbrv] git commit: calculate start offset for new topology consistently

Posted by pt...@apache.org.
calculate start offset for new topology consistently


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/f7890915
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/f7890915
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/f7890915

Branch: refs/heads/master
Commit: f789091534be95c103890be6539bbfc5faf69b37
Parents: 5b764cd
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Jan 18 15:51:44 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Jan 18 15:56:19 2014 +0000

----------------------------------------------------------------------
 src/test/storm/kafka/KafkaUtilsTest.java | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/f7890915/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaUtilsTest.java b/src/test/storm/kafka/KafkaUtilsTest.java
index 506789c..20a4221 100644
--- a/src/test/storm/kafka/KafkaUtilsTest.java
+++ b/src/test/storm/kafka/KafkaUtilsTest.java
@@ -70,6 +70,26 @@ public class KafkaUtilsTest {
         sendMessageAndAssertValueForOffset(-99);
     }
 
+    @Test
+    public void getOffsetFromConfigAndDontForceFromStart() {
+        config.forceFromStart = false;
+        config.startOffsetTime = OffsetRequest.EarliestTime();
+        createTopicAndSendMessage();
+        long latestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime());
+        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
+        assertThat(latestOffset, is(equalTo(offsetFromConfig)));
+    }
+
+    @Test
+    public void getOffsetFromConfigAndFroceFromStart() {
+        config.forceFromStart = true;
+        config.startOffsetTime = OffsetRequest.EarliestTime();
+        createTopicAndSendMessage();
+        long earliestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
+        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
+        assertThat(earliestOffset, is(equalTo(offsetFromConfig)));
+    }
+
     private String createTopicAndSendMessage() {
         Properties p = new Properties();
         p.setProperty("metadata.broker.list", "localhost:49123");


[04/50] [abbrv] git commit: use consistent formatting

Posted by pt...@apache.org.
use consistent formatting


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/e8f54d63
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/e8f54d63
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/e8f54d63

Branch: refs/heads/master
Commit: e8f54d63094806a2a1364adb5e209ab2ca10f0f0
Parents: d35a6ee
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Dec 22 15:58:01 2013 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Dec 22 15:58:01 2013 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/DynamicBrokersReader.java   | 158 +++----
 .../kafka/DynamicPartitionConnections.java      |  34 +-
 src/jvm/storm/kafka/HostPort.java               |  50 +-
 src/jvm/storm/kafka/KafkaConfig.java            |  16 +-
 src/jvm/storm/kafka/KafkaSpout.java             |  45 +-
 src/jvm/storm/kafka/Partition.java              |  50 +-
 src/jvm/storm/kafka/PartitionCoordinator.java   |   1 +
 src/jvm/storm/kafka/PartitionManager.java       | 100 ++--
 src/jvm/storm/kafka/StaticCoordinator.java      |  12 +-
 src/jvm/storm/kafka/StaticHosts.java            |  17 +-
 .../storm/kafka/StaticPartitionConnections.java |   8 +-
 src/jvm/storm/kafka/ZkCoordinator.java          |  48 +-
 src/jvm/storm/kafka/ZkHosts.java                |  22 +-
 src/jvm/storm/kafka/ZkState.java                |  72 +--
 src/jvm/storm/kafka/trident/Coordinator.java    |  44 +-
 .../storm/kafka/trident/DefaultCoordinator.java |   2 +-
 .../trident/GlobalPartitionInformation.java     |  86 ++--
 .../storm/kafka/trident/IBatchCoordinator.java  |   1 +
 src/jvm/storm/kafka/trident/IBrokerReader.java  |   1 +
 src/jvm/storm/kafka/trident/KafkaUtils.java     |  74 +--
 src/jvm/storm/kafka/trident/MaxMetric.java      |   8 +-
 .../kafka/trident/OpaqueTridentKafkaSpout.java  |  14 +-
 .../storm/kafka/trident/StaticBrokerReader.java |   6 +-
 .../trident/TransactionalTridentKafkaSpout.java |   8 +-
 .../storm/kafka/trident/TridentKafkaConfig.java |   6 +-
 .../kafka/trident/TridentKafkaEmitter.java      | 466 ++++++++++---------
 src/jvm/storm/kafka/trident/ZkBrokerReader.java |  12 +-
 .../storm/kafka/DynamicBrokersReaderTest.java   | 258 +++++-----
 28 files changed, 821 insertions(+), 798 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/DynamicBrokersReader.java b/src/jvm/storm/kafka/DynamicBrokersReader.java
index ae15534..c802baf 100644
--- a/src/jvm/storm/kafka/DynamicBrokersReader.java
+++ b/src/jvm/storm/kafka/DynamicBrokersReader.java
@@ -12,117 +12,117 @@ import storm.kafka.trident.GlobalPartitionInformation;
 
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
-import java.util.*;
+import java.util.List;
+import java.util.Map;
 
 public class DynamicBrokersReader {
 
-	public static final Logger LOG = LoggerFactory.getLogger(DynamicBrokersReader.class);
+    public static final Logger LOG = LoggerFactory.getLogger(DynamicBrokersReader.class);
 
     private CuratorFramework _curator;
     private String _zkPath;
     private String _topic;
-    
+
     public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
-		_zkPath = zkPath;
-		_topic = topic;
-		try {
-			_curator = CuratorFrameworkFactory.newClient(
-				zkStr,
-				Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
-				15000,
-				new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
-				Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
-			_curator.start();
-		} catch (IOException ex)  {
-			LOG.error("can't connect to zookeeper");
-		}
+        _zkPath = zkPath;
+        _topic = topic;
+        try {
+            _curator = CuratorFrameworkFactory.newClient(
+                    zkStr,
+                    Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
+                    15000,
+                    new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
+                            Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
+            _curator.start();
+        } catch (IOException ex) {
+            LOG.error("can't connect to zookeeper");
+        }
     }
-    
+
     /**
-	 * Get all partitions with their current leaders
+     * Get all partitions with their current leaders
      */
     public GlobalPartitionInformation getBrokerInfo() {
-		GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
         try {
-			int numPartitionsForTopic = getNumPartitions();
-			String brokerInfoPath = brokerPath();
-			for (int partition = 0; partition < numPartitionsForTopic; partition++) {
-				int leader = getLeaderFor(partition);
-				String path = brokerInfoPath + "/" + leader;
-				try {
-					byte[] hostPortData = _curator.getData().forPath(path);
-					HostPort hp = getBrokerHost(hostPortData);
-					globalPartitionInformation.addPartition(partition, hp);
-				} catch(org.apache.zookeeper.KeeperException.NoNodeException e) {
-					LOG.error("Node {} does not exist ", path);
-				}
-			}
-        } catch(Exception e) {
+            int numPartitionsForTopic = getNumPartitions();
+            String brokerInfoPath = brokerPath();
+            for (int partition = 0; partition < numPartitionsForTopic; partition++) {
+                int leader = getLeaderFor(partition);
+                String path = brokerInfoPath + "/" + leader;
+                try {
+                    byte[] hostPortData = _curator.getData().forPath(path);
+                    HostPort hp = getBrokerHost(hostPortData);
+                    globalPartitionInformation.addPartition(partition, hp);
+                } catch (org.apache.zookeeper.KeeperException.NoNodeException e) {
+                    LOG.error("Node {} does not exist ", path);
+                }
+            }
+        } catch (Exception e) {
             throw new RuntimeException(e);
         }
-		LOG.info("Read partition info from zookeeper: " + globalPartitionInformation);
+        LOG.info("Read partition info from zookeeper: " + globalPartitionInformation);
         return globalPartitionInformation;
     }
 
 
+    private int getNumPartitions() {
+        try {
+            String topicBrokersPath = partitionPath();
+            List<String> children = _curator.getChildren().forPath(topicBrokersPath);
+            return children.size();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
 
-	private int getNumPartitions() {
-		try {
-			String topicBrokersPath = partitionPath();
-			List<String> children = _curator.getChildren().forPath(topicBrokersPath);
-			return children.size();
-		} catch(Exception e) {
-			throw new RuntimeException(e);
-		}
-	}
-
-	public String partitionPath() {
-		return _zkPath + "/topics/" + _topic + "/partitions";
-	}
+    public String partitionPath() {
+        return _zkPath + "/topics/" + _topic + "/partitions";
+    }
 
-	public String brokerPath() {
-		return _zkPath + "/ids";
-	}
+    public String brokerPath() {
+        return _zkPath + "/ids";
+    }
 
-	/**
-	 * get /brokers/topics/distributedTopic/partitions/1/state
-	 * { "controller_epoch":4, "isr":[ 1, 0 ], "leader":1, "leader_epoch":1, "version":1 }
-	 * @param partition
-	 * @return
-	 */
-	private int getLeaderFor(long partition) {
-		try {
-			String topicBrokersPath = partitionPath();
-			byte[] hostPortData = _curator.getData().forPath(topicBrokersPath + "/" + partition + "/state" );
-			Map<Object, Object> value = (Map<Object,Object>) JSONValue.parse(new String(hostPortData, "UTF-8"));
-			Integer leader = ((Number) value.get("leader")).intValue();
-			return leader;
-		} catch (Exception e) {
-			throw new RuntimeException(e);
-		}
-	}
+    /**
+     * get /brokers/topics/distributedTopic/partitions/1/state
+     * { "controller_epoch":4, "isr":[ 1, 0 ], "leader":1, "leader_epoch":1, "version":1 }
+     *
+     * @param partition
+     * @return
+     */
+    private int getLeaderFor(long partition) {
+        try {
+            String topicBrokersPath = partitionPath();
+            byte[] hostPortData = _curator.getData().forPath(topicBrokersPath + "/" + partition + "/state");
+            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(hostPortData, "UTF-8"));
+            Integer leader = ((Number) value.get("leader")).intValue();
+            return leader;
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
 
     public void close() {
         _curator.close();
     }
 
-	/**
-	 *
-	 * [zk: localhost:2181(CONNECTED) 56] get /brokers/ids/0
-	 * { "host":"localhost", "jmx_port":9999, "port":9092, "version":1 }
-	 *
-	 * @param contents
-	 * @return
-	 */
+    /**
+     * [zk: localhost:2181(CONNECTED) 56] get /brokers/ids/0
+     * { "host":"localhost", "jmx_port":9999, "port":9092, "version":1 }
+     *
+     * @param contents
+     * @return
+     */
     private HostPort getBrokerHost(byte[] contents) {
         try {
-			Map<Object, Object> value = (Map<Object,Object>) JSONValue.parse(new String(contents, "UTF-8"));
-			String host = (String) value.get("host");
-			Integer port = ((Long) value.get("port")).intValue();
+            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(contents, "UTF-8"));
+            String host = (String) value.get("host");
+            Integer port = ((Long) value.get("port")).intValue();
             return new HostPort(host, port);
         } catch (UnsupportedEncodingException e) {
             throw new RuntimeException(e);
         }
-    }  
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/DynamicPartitionConnections.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/DynamicPartitionConnections.java b/src/jvm/storm/kafka/DynamicPartitionConnections.java
index 409ec37..7a799a0 100644
--- a/src/jvm/storm/kafka/DynamicPartitionConnections.java
+++ b/src/jvm/storm/kafka/DynamicPartitionConnections.java
@@ -13,33 +13,33 @@ import java.util.Set;
 
 public class DynamicPartitionConnections {
 
-	public static final Logger LOG = LoggerFactory.getLogger(DynamicPartitionConnections.class);
+    public static final Logger LOG = LoggerFactory.getLogger(DynamicPartitionConnections.class);
 
     static class ConnectionInfo {
         SimpleConsumer consumer;
         Set<Integer> partitions = new HashSet();
-        
+
         public ConnectionInfo(SimpleConsumer consumer) {
             this.consumer = consumer;
         }
     }
-    
+
     Map<HostPort, ConnectionInfo> _connections = new HashMap();
     KafkaConfig _config;
-	IBrokerReader _reader;
-    
+    IBrokerReader _reader;
+
     public DynamicPartitionConnections(KafkaConfig config, IBrokerReader brokerReader) {
         _config = config;
-		_reader = brokerReader;
+        _reader = brokerReader;
     }
-    
+
     public SimpleConsumer register(Partition partition) {
-		HostPort hostPort = _reader.getCurrentBrokers().getHostFor(partition.partition);
-		return register(hostPort, partition.partition);
+        HostPort hostPort = _reader.getCurrentBrokers().getHostFor(partition.partition);
+        return register(hostPort, partition.partition);
     }
-    
+
     public SimpleConsumer register(HostPort host, int partition) {
-        if(!_connections.containsKey(host)) {
+        if (!_connections.containsKey(host)) {
             _connections.put(host, new ConnectionInfo(new SimpleConsumer(host.host, host.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId)));
         }
         ConnectionInfo info = _connections.get(host);
@@ -49,14 +49,16 @@ public class DynamicPartitionConnections {
 
     public SimpleConsumer getConnection(Partition partition) {
         ConnectionInfo info = _connections.get(partition.host);
-        if(info != null) return info.consumer;
+        if (info != null) {
+            return info.consumer;
+        }
         return null;
     }
-    
+
     public void unregister(HostPort port, int partition) {
         ConnectionInfo info = _connections.get(port);
         info.partitions.remove(partition);
-        if(info.partitions.isEmpty()) {
+        if (info.partitions.isEmpty()) {
             info.consumer.close();
             _connections.remove(port);
         }
@@ -65,9 +67,9 @@ public class DynamicPartitionConnections {
     public void unregister(Partition partition) {
         unregister(partition.host, partition.partition);
     }
-    
+
     public void clear() {
-        for(ConnectionInfo info: _connections.values()) {
+        for (ConnectionInfo info : _connections.values()) {
             info.consumer.close();
         }
         _connections.clear();

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/HostPort.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/HostPort.java b/src/jvm/storm/kafka/HostPort.java
index afb5da5..5369858 100644
--- a/src/jvm/storm/kafka/HostPort.java
+++ b/src/jvm/storm/kafka/HostPort.java
@@ -1,18 +1,16 @@
 package storm.kafka;
 
 import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
 
 public class HostPort implements Serializable, Comparable<HostPort> {
     public String host;
     public int port;
-    
+
     public HostPort(String host, int port) {
         this.host = host;
         this.port = port;
     }
-    
+
     public HostPort(String host) {
         this(host, 9092);
     }
@@ -33,26 +31,26 @@ public class HostPort implements Serializable, Comparable<HostPort> {
         return host + ":" + port;
     }
 
-	public static HostPort fromString(String host) {
-		HostPort hp;
-		String[] spec = host.split(":");
-		if (spec.length == 1) {
-			hp = new HostPort(spec[0]);
-		} else if (spec.length == 2) {
-			hp = new HostPort(spec[0], Integer.parseInt(spec[1]));
-		} else {
-			throw new IllegalArgumentException("Invalid host specification: " + host);
-		}
-		return hp;
-	}
-
-
-	@Override
-	public int compareTo(HostPort o) {
-		if ( this.host.equals(o.host)) {
-			return this.port - o.port;
-		} else {
-			return this.host.compareTo(o.host);
-		}
-	}
+    public static HostPort fromString(String host) {
+        HostPort hp;
+        String[] spec = host.split(":");
+        if (spec.length == 1) {
+            hp = new HostPort(spec[0]);
+        } else if (spec.length == 2) {
+            hp = new HostPort(spec[0], Integer.parseInt(spec[1]));
+        } else {
+            throw new IllegalArgumentException("Invalid host specification: " + host);
+        }
+        return hp;
+    }
+
+
+    @Override
+    public int compareTo(HostPort o) {
+        if (this.host.equals(o.host)) {
+            return this.port - o.port;
+        } else {
+            return this.host.compareTo(o.host);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/KafkaConfig.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaConfig.java b/src/jvm/storm/kafka/KafkaConfig.java
index 457eeb4..e241978 100644
--- a/src/jvm/storm/kafka/KafkaConfig.java
+++ b/src/jvm/storm/kafka/KafkaConfig.java
@@ -9,11 +9,11 @@ public class KafkaConfig implements Serializable {
 
     public final BrokerHosts hosts;
     public final String topic;
-	public final String clientId;
+    public final String clientId;
 
-    public int fetchSizeBytes = 1024*1024;
+    public int fetchSizeBytes = 1024 * 1024;
     public int socketTimeoutMs = 10000;
-    public int bufferSizeBytes = 1024*1024;
+    public int bufferSizeBytes = 1024 * 1024;
     public MultiScheme scheme = new RawMultiScheme();
     public boolean forceFromStart = false;
     public long startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
@@ -22,11 +22,11 @@ public class KafkaConfig implements Serializable {
         this(hosts, topic, kafka.api.OffsetRequest.DefaultClientId());
     }
 
-	public KafkaConfig(BrokerHosts hosts, String topic, String clientId) {
-		this.hosts = hosts;
-		this.topic = topic;
-		this.clientId = clientId;
-	}
+    public KafkaConfig(BrokerHosts hosts, String topic, String clientId) {
+        this.hosts = hosts;
+        this.topic = topic;
+        this.clientId = clientId;
+    }
 
     public void forceStartOffsetTime(long millis) {
         startOffsetTime = millis;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/KafkaSpout.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaSpout.java b/src/jvm/storm/kafka/KafkaSpout.java
index 781e6ce..cf407ad 100644
--- a/src/jvm/storm/kafka/KafkaSpout.java
+++ b/src/jvm/storm/kafka/KafkaSpout.java
@@ -54,21 +54,25 @@ public class KafkaSpout extends BaseRichSpout {
     public void open(Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
         _collector = collector;
 
-	Map stateConf = new HashMap(conf);
+        Map stateConf = new HashMap(conf);
         List<String> zkServers = _spoutConfig.zkServers;
-        if(zkServers==null) zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
+        if (zkServers == null) {
+            zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
+        }
         Integer zkPort = _spoutConfig.zkPort;
-        if(zkPort==null) zkPort = ((Number) conf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();
+        if (zkPort == null) {
+            zkPort = ((Number) conf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();
+        }
         stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, zkServers);
         stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, zkPort);
         stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_ROOT, _spoutConfig.zkRoot);
-	    _state = new ZkState(stateConf);
+        _state = new ZkState(stateConf);
 
         _connections = new DynamicPartitionConnections(_spoutConfig, KafkaUtils.makeBrokerReader(conf, _spoutConfig));
 
         // using TransactionalState like this is a hack
         int totalTasks = context.getComponentTasks(context.getThisComponentId()).size();
-        if(_spoutConfig.hosts instanceof StaticHosts) {
+        if (_spoutConfig.hosts instanceof StaticHosts) {
             _coordinator = new StaticCoordinator(_connections, conf, _spoutConfig, _state, context.getThisTaskIndex(), totalTasks, _uuid);
         } else {
             _coordinator = new ZkCoordinator(_connections, conf, _spoutConfig, _state, context.getThisTaskIndex(), totalTasks, _uuid);
@@ -76,13 +80,16 @@ public class KafkaSpout extends BaseRichSpout {
 
         context.registerMetric("kafkaOffset", new IMetric() {
             KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_spoutConfig.topic, _connections);
+
             @Override
             public Object getValueAndReset() {
                 List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
                 Set<Partition> latestPartitions = new HashSet();
-                for(PartitionManager pm : pms) { latestPartitions.add(pm.getPartition()); }
+                for (PartitionManager pm : pms) {
+                    latestPartitions.add(pm.getPartition());
+                }
                 _kafkaOffsetMetric.refreshPartitions(latestPartitions);
-                for(PartitionManager pm : pms) {
+                for (PartitionManager pm : pms) {
                     _kafkaOffsetMetric.setLatestEmittedOffset(pm.getPartition(), pm.lastCompletedOffset());
                 }
                 return _kafkaOffsetMetric.getValueAndReset();
@@ -94,7 +101,7 @@ public class KafkaSpout extends BaseRichSpout {
             public Object getValueAndReset() {
                 List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
                 Map concatMetricsDataMaps = new HashMap();
-                for(PartitionManager pm : pms) {
+                for (PartitionManager pm : pms) {
                     concatMetricsDataMaps.putAll(pm.getMetricsDataMap());
                 }
                 return concatMetricsDataMaps;
@@ -104,27 +111,27 @@ public class KafkaSpout extends BaseRichSpout {
 
     @Override
     public void close() {
-	_state.close();
+        _state.close();
     }
 
     @Override
     public void nextTuple() {
         List<PartitionManager> managers = _coordinator.getMyManagedPartitions();
-        for(int i=0; i<managers.size(); i++) {
-            
+        for (int i = 0; i < managers.size(); i++) {
+
             // in case the number of managers decreased
             _currPartitionIndex = _currPartitionIndex % managers.size();
             EmitState state = managers.get(_currPartitionIndex).next(_collector);
-            if(state!=EmitState.EMITTED_MORE_LEFT) {
+            if (state != EmitState.EMITTED_MORE_LEFT) {
                 _currPartitionIndex = (_currPartitionIndex + 1) % managers.size();
             }
-            if(state!=EmitState.NO_EMITTED) {
+            if (state != EmitState.NO_EMITTED) {
                 break;
             }
         }
 
         long now = System.currentTimeMillis();
-        if((now - _lastUpdateMs) > _spoutConfig.stateUpdateIntervalMs) {
+        if ((now - _lastUpdateMs) > _spoutConfig.stateUpdateIntervalMs) {
             commit();
         }
     }
@@ -133,18 +140,18 @@ public class KafkaSpout extends BaseRichSpout {
     public void ack(Object msgId) {
         KafkaMessageId id = (KafkaMessageId) msgId;
         PartitionManager m = _coordinator.getManager(id.partition);
-        if(m!=null) {
+        if (m != null) {
             m.ack(id.offset);
-        }                
+        }
     }
 
     @Override
     public void fail(Object msgId) {
         KafkaMessageId id = (KafkaMessageId) msgId;
         PartitionManager m = _coordinator.getManager(id.partition);
-        if(m!=null) {
+        if (m != null) {
             m.fail(id.offset);
-        } 
+        }
     }
 
     @Override
@@ -159,7 +166,7 @@ public class KafkaSpout extends BaseRichSpout {
 
     private void commit() {
         _lastUpdateMs = System.currentTimeMillis();
-        for(PartitionManager manager: _coordinator.getMyManagedPartitions()) {
+        for (PartitionManager manager : _coordinator.getMyManagedPartitions()) {
             manager.commit();
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/Partition.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/Partition.java b/src/jvm/storm/kafka/Partition.java
index 6de0f6a..87ab7b8 100644
--- a/src/jvm/storm/kafka/Partition.java
+++ b/src/jvm/storm/kafka/Partition.java
@@ -6,7 +6,7 @@ import storm.trident.spout.ISpoutPartition;
 
 public class Partition implements ISpoutPartition {
 
-	public final HostPort host;
+    public final HostPort host;
     public final int partition;
 
     public Partition(HostPort host, int partition) {
@@ -14,30 +14,30 @@ public class Partition implements ISpoutPartition {
         this.partition = partition;
     }
 
-	@Override
-	public int hashCode() {
-		return Objects.hashCode(host, partition);
-	}
-
-	@Override
-	public boolean equals(Object obj) {
-		if (this == obj) {
-			return true;
-		}
-		if (obj == null || getClass() != obj.getClass()) {
-			return false;
-		}
-		final Partition other = (Partition) obj;
-		return Objects.equal(this.host, other.host) && Objects.equal(this.partition, other.partition);
-	}
-
-	@Override
-	public String toString() {
-		return "Partition{" +
-				"host=" + host +
-				", partition=" + partition +
-				'}';
-	}
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(host, partition);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null || getClass() != obj.getClass()) {
+            return false;
+        }
+        final Partition other = (Partition) obj;
+        return Objects.equal(this.host, other.host) && Objects.equal(this.partition, other.partition);
+    }
+
+    @Override
+    public String toString() {
+        return "Partition{" +
+                "host=" + host +
+                ", partition=" + partition +
+                '}';
+    }
 
     @Override
     public String getId() {

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/PartitionCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionCoordinator.java b/src/jvm/storm/kafka/PartitionCoordinator.java
index 2ee2009..d28248d 100644
--- a/src/jvm/storm/kafka/PartitionCoordinator.java
+++ b/src/jvm/storm/kafka/PartitionCoordinator.java
@@ -4,5 +4,6 @@ import java.util.List;
 
 public interface PartitionCoordinator {
     List<PartitionManager> getMyManagedPartitions();
+
     PartitionManager getManager(Partition partition);
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionManager.java b/src/jvm/storm/kafka/PartitionManager.java
index 6596f96..623bc10 100644
--- a/src/jvm/storm/kafka/PartitionManager.java
+++ b/src/jvm/storm/kafka/PartitionManager.java
@@ -58,31 +58,30 @@ public class PartitionManager {
         _spoutConfig = spoutConfig;
         _topologyInstanceId = topologyInstanceId;
         _consumer = connections.register(id.host, id.partition);
-		_state = state;
+        _state = state;
         _stormConf = stormConf;
 
         String jsonTopologyId = null;
         Long jsonOffset = null;
         try {
             Map<Object, Object> json = _state.readJSON(committedPath());
-            if(json != null) {
-                jsonTopologyId = (String)((Map<Object,Object>)json.get("topology")).get("id");
-                jsonOffset = (Long)json.get("offset");
+            if (json != null) {
+                jsonTopologyId = (String) ((Map<Object, Object>) json.get("topology")).get("id");
+                jsonOffset = (Long) json.get("offset");
             }
-        }
-        catch(Throwable e) {
+        } catch (Throwable e) {
             LOG.warn("Error reading and/or parsing at ZkNode: " + committedPath(), e);
         }
 
-        if(!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.forceFromStart) {
+        if (!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.forceFromStart) {
             _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition, spoutConfig.startOffsetTime);
-	    LOG.info("Using startOffsetTime to choose last commit offset.");
-        } else if(jsonTopologyId == null || jsonOffset == null) { // failed to parse JSON?
-            _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition,  kafka.api.OffsetRequest.LatestTime());
-	    LOG.info("Setting last commit offset to HEAD.");
+            LOG.info("Using startOffsetTime to choose last commit offset.");
+        } else if (jsonTopologyId == null || jsonOffset == null) { // failed to parse JSON?
+            _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition, kafka.api.OffsetRequest.LatestTime());
+            LOG.info("Setting last commit offset to HEAD.");
         } else {
             _committedTo = jsonOffset;
-	    LOG.info("Read last commit offset from zookeeper: " + _committedTo);
+            LOG.info("Read last commit offset from zookeeper: " + _committedTo);
         }
 
         LOG.info("Starting Kafka " + _consumer.host() + ":" + id.partition + " from offset " + _committedTo);
@@ -105,22 +104,25 @@ public class PartitionManager {
 
     //returns false if it's reached the end of current batch
     public EmitState next(SpoutOutputCollector collector) {
-        if(_waitingToEmit.isEmpty()) fill();
-        while(true) {
+        if (_waitingToEmit.isEmpty()) {
+            fill();
+        }
+        while (true) {
             MessageAndRealOffset toEmit = _waitingToEmit.pollFirst();
-            if(toEmit==null) {
+            if (toEmit == null) {
                 return EmitState.NO_EMITTED;
             }
             Iterable<List<Object>> tups = _spoutConfig.scheme.deserialize(Utils.toByteArray(toEmit.msg.payload()));
-            if(tups!=null) {
-                for(List<Object> tup: tups)
+            if (tups != null) {
+                for (List<Object> tup : tups) {
                     collector.emit(tup, new KafkaMessageId(_partition, toEmit.offset));
+                }
                 break;
             } else {
                 ack(toEmit.offset);
             }
         }
-        if(!_waitingToEmit.isEmpty()) {
+        if (!_waitingToEmit.isEmpty()) {
             return EmitState.EMITTED_MORE_LEFT;
         } else {
             return EmitState.EMITTED_END;
@@ -132,11 +134,11 @@ public class PartitionManager {
         long start = System.nanoTime();
         ByteBufferMessageSet msgs = _consumer.fetch(
                 new FetchRequestBuilder().addFetch(
-                    _spoutConfig.topic,
-                    _partition.partition,
-                    _emittedToOffset,
-                    _spoutConfig.fetchSizeBytes).build()).messageSet(_spoutConfig.topic,
-				_partition.partition);
+                        _spoutConfig.topic,
+                        _partition.partition,
+                        _emittedToOffset,
+                        _spoutConfig.fetchSizeBytes).build()).messageSet(_spoutConfig.topic,
+                _partition.partition);
         long end = System.nanoTime();
         long millis = (end - start) / 1000000;
         _fetchAPILatencyMax.update(millis);
@@ -145,26 +147,26 @@ public class PartitionManager {
         int numMessages = countMessages(msgs);
         _fetchAPIMessageCount.incrBy(numMessages);
 
-        if(numMessages>0) {
-          LOG.info("Fetched " + numMessages + " messages from Kafka: " + _consumer.host() + ":" + _partition.partition);
+        if (numMessages > 0) {
+            LOG.info("Fetched " + numMessages + " messages from Kafka: " + _consumer.host() + ":" + _partition.partition);
         }
-        for(MessageAndOffset msg: msgs) {
+        for (MessageAndOffset msg : msgs) {
             _pending.add(_emittedToOffset);
             _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
             _emittedToOffset = msg.nextOffset();
         }
-        if(numMessages>0) {
-          LOG.info("Added " + numMessages + " messages from Kafka: " + _consumer.host() + ":" + _partition.partition + " to internal buffers");
+        if (numMessages > 0) {
+            LOG.info("Added " + numMessages + " messages from Kafka: " + _consumer.host() + ":" + _partition.partition + " to internal buffers");
         }
     }
 
-	private int countMessages(ByteBufferMessageSet messageSet) {
-		int counter = 0;
-		for (MessageAndOffset messageAndOffset : messageSet) {
-			counter = counter + 1;
-		}
-		return counter;
-	}
+    private int countMessages(ByteBufferMessageSet messageSet) {
+        int counter = 0;
+        for (MessageAndOffset messageAndOffset : messageSet) {
+            counter = counter + 1;
+        }
+        return counter;
+    }
 
     public void ack(Long offset) {
         _pending.remove(offset);
@@ -173,7 +175,7 @@ public class PartitionManager {
     public void fail(Long offset) {
         //TODO: should it use in-memory ack set to skip anything that's been acked but not committed???
         // things might get crazy with lots of timeouts
-        if(_emittedToOffset > offset) {
+        if (_emittedToOffset > offset) {
             _emittedToOffset = offset;
             _pending.tailSet(offset).clear();
         }
@@ -182,23 +184,23 @@ public class PartitionManager {
     public void commit() {
         LOG.info("Committing offset for " + _partition);
         long committedTo;
-        if(_pending.isEmpty()) {
+        if (_pending.isEmpty()) {
             committedTo = _emittedToOffset;
         } else {
             committedTo = _pending.first();
         }
-        if(committedTo!=_committedTo) {
+        if (committedTo != _committedTo) {
             LOG.info("Writing committed offset to ZK: " + committedTo);
 
-            Map<Object, Object> data = (Map<Object,Object>)ImmutableMap.builder()
-                .put("topology", ImmutableMap.of("id", _topologyInstanceId,
-						"name", _stormConf.get(Config.TOPOLOGY_NAME)))
-                .put("offset", committedTo)
-                .put("partition", _partition.partition)
-                .put("broker", ImmutableMap.of("host", _partition.host.host,
-						"port", _partition.host.port))
-                .put("topic", _spoutConfig.topic).build();
-	    _state.writeJSON(committedPath(), data);
+            Map<Object, Object> data = (Map<Object, Object>) ImmutableMap.builder()
+                    .put("topology", ImmutableMap.of("id", _topologyInstanceId,
+                            "name", _stormConf.get(Config.TOPOLOGY_NAME)))
+                    .put("offset", committedTo)
+                    .put("partition", _partition.partition)
+                    .put("broker", ImmutableMap.of("host", _partition.host.host,
+                            "port", _partition.host.port))
+                    .put("topic", _spoutConfig.topic).build();
+            _state.writeJSON(committedPath(), data);
 
             LOG.info("Wrote committed offset to ZK: " + committedTo);
             _committedTo = committedTo;
@@ -212,7 +214,7 @@ public class PartitionManager {
 
     public long queryPartitionOffsetLatestTime() {
         return KafkaUtils.getOffset(_consumer, _spoutConfig.topic, _partition.partition,
-				OffsetRequest.LatestTime());
+                OffsetRequest.LatestTime());
     }
 
     public long lastCommittedOffset() {
@@ -220,7 +222,7 @@ public class PartitionManager {
     }
 
     public long lastCompletedOffset() {
-        if(_pending.isEmpty()) {
+        if (_pending.isEmpty()) {
             return _emittedToOffset;
         } else {
             return _pending.first();

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/StaticCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/StaticCoordinator.java b/src/jvm/storm/kafka/StaticCoordinator.java
index 6f97c8b..7415522 100644
--- a/src/jvm/storm/kafka/StaticCoordinator.java
+++ b/src/jvm/storm/kafka/StaticCoordinator.java
@@ -13,22 +13,22 @@ public class StaticCoordinator implements PartitionCoordinator {
     public StaticCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig config, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
         StaticHosts hosts = (StaticHosts) config.hosts;
         List<Partition> partitions = hosts.getPartitionInformation().getOrderedPartitions();
-        for(int i=taskIndex; i<partitions.size(); i+=totalTasks) {
+        for (int i = taskIndex; i < partitions.size(); i += totalTasks) {
             Partition myPartition = partitions.get(i);
             _managers.put(myPartition, new PartitionManager(connections, topologyInstanceId, state, stormConf, config, myPartition));
-            
+
         }
-        
+
         _allManagers = new ArrayList(_managers.values());
     }
-    
+
     @Override
     public List<PartitionManager> getMyManagedPartitions() {
         return _allManagers;
     }
-    
+
     public PartitionManager getManager(Partition partition) {
         return _managers.get(partition);
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/StaticHosts.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/StaticHosts.java b/src/jvm/storm/kafka/StaticHosts.java
index 6ed828d..9ed7193 100644
--- a/src/jvm/storm/kafka/StaticHosts.java
+++ b/src/jvm/storm/kafka/StaticHosts.java
@@ -2,9 +2,6 @@ package storm.kafka;
 
 import storm.kafka.trident.GlobalPartitionInformation;
 
-import java.util.ArrayList;
-import java.util.List;
-
 /**
  * Date: 11/05/2013
  * Time: 14:43
@@ -12,13 +9,13 @@ import java.util.List;
 public class StaticHosts implements BrokerHosts {
 
 
-	private GlobalPartitionInformation partitionInformation;
+    private GlobalPartitionInformation partitionInformation;
 
-	public StaticHosts(GlobalPartitionInformation partitionInformation) {
-		this.partitionInformation = partitionInformation;
-	}
+    public StaticHosts(GlobalPartitionInformation partitionInformation) {
+        this.partitionInformation = partitionInformation;
+    }
 
-	public GlobalPartitionInformation getPartitionInformation() {
-		return partitionInformation;
-	}
+    public GlobalPartitionInformation getPartitionInformation() {
+        return partitionInformation;
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/StaticPartitionConnections.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/StaticPartitionConnections.java b/src/jvm/storm/kafka/StaticPartitionConnections.java
index 2d40c8b..4294362 100644
--- a/src/jvm/storm/kafka/StaticPartitionConnections.java
+++ b/src/jvm/storm/kafka/StaticPartitionConnections.java
@@ -9,17 +9,17 @@ public class StaticPartitionConnections {
     Map<Integer, SimpleConsumer> _kafka = new HashMap<Integer, SimpleConsumer>();
     KafkaConfig _config;
     StaticHosts hosts;
-    
+
     public StaticPartitionConnections(KafkaConfig conf) {
         _config = conf;
-        if(!(conf.hosts instanceof StaticHosts)) {
+        if (!(conf.hosts instanceof StaticHosts)) {
             throw new RuntimeException("Must configure with static hosts");
         }
         this.hosts = (StaticHosts) conf.hosts;
     }
 
     public SimpleConsumer getConsumer(int partition) {
-		if(!_kafka.containsKey(partition)) {
+        if (!_kafka.containsKey(partition)) {
             HostPort hp = hosts.getPartitionInformation().getHostFor(partition);
             _kafka.put(partition, new SimpleConsumer(hp.host, hp.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId));
 
@@ -28,7 +28,7 @@ public class StaticPartitionConnections {
     }
 
     public void close() {
-        for(SimpleConsumer consumer: _kafka.values()) {
+        for (SimpleConsumer consumer : _kafka.values()) {
             consumer.close();
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/ZkCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/ZkCoordinator.java b/src/jvm/storm/kafka/ZkCoordinator.java
index d457bdd..98e51a3 100644
--- a/src/jvm/storm/kafka/ZkCoordinator.java
+++ b/src/jvm/storm/kafka/ZkCoordinator.java
@@ -9,7 +9,7 @@ import java.util.*;
 
 public class ZkCoordinator implements PartitionCoordinator {
     public static final Logger LOG = LoggerFactory.getLogger(ZkCoordinator.class);
-    
+
     SpoutConfig _spoutConfig;
     int _taskIndex;
     int _totalTasks;
@@ -23,7 +23,7 @@ public class ZkCoordinator implements PartitionCoordinator {
     ZkState _state;
     Map _stormConf;
     IMetricsContext _metricsContext;
-    
+
     public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
         _spoutConfig = spoutConfig;
         _connections = connections;
@@ -31,55 +31,55 @@ public class ZkCoordinator implements PartitionCoordinator {
         _totalTasks = totalTasks;
         _topologyInstanceId = topologyInstanceId;
         _stormConf = stormConf;
-		_state = state;
+        _state = state;
 
         ZkHosts brokerConf = (ZkHosts) spoutConfig.hosts;
         _refreshFreqMs = brokerConf.refreshFreqSecs * 1000;
         _reader = new DynamicBrokersReader(stormConf, brokerConf.brokerZkStr, brokerConf.brokerZkPath, spoutConfig.topic);
-        
+
     }
-    
+
     @Override
     public List<PartitionManager> getMyManagedPartitions() {
-        if(_lastRefreshTime==null || (System.currentTimeMillis() - _lastRefreshTime) > _refreshFreqMs) {
+        if (_lastRefreshTime == null || (System.currentTimeMillis() - _lastRefreshTime) > _refreshFreqMs) {
             refresh();
             _lastRefreshTime = System.currentTimeMillis();
         }
         return _cachedList;
     }
-    
+
     void refresh() {
         try {
             LOG.info("Refreshing partition manager connections");
-			GlobalPartitionInformation brokerInfo = _reader.getBrokerInfo();
+            GlobalPartitionInformation brokerInfo = _reader.getBrokerInfo();
             Set<Partition> mine = new HashSet();
-			for (Partition partitionId: brokerInfo){
-				if(myOwnership(partitionId)) {
-					mine.add(partitionId);
-				}
-			}
+            for (Partition partitionId : brokerInfo) {
+                if (myOwnership(partitionId)) {
+                    mine.add(partitionId);
+                }
+            }
 
             Set<Partition> curr = _managers.keySet();
             Set<Partition> newPartitions = new HashSet<Partition>(mine);
             newPartitions.removeAll(curr);
-            
+
             Set<Partition> deletedPartitions = new HashSet<Partition>(curr);
             deletedPartitions.removeAll(mine);
-            
+
             LOG.info("Deleted partition managers: " + deletedPartitions.toString());
-            
-            for(Partition id: deletedPartitions) {
+
+            for (Partition id : deletedPartitions) {
                 PartitionManager man = _managers.remove(id);
                 man.close();
             }
             LOG.info("New partition managers: " + newPartitions.toString());
-            
-            for(Partition id: newPartitions) {
+
+            for (Partition id : newPartitions) {
                 PartitionManager man = new PartitionManager(_connections, _topologyInstanceId, _state, _stormConf, _spoutConfig, id);
                 _managers.put(id, man);
             }
-            
-        } catch(Exception e) {
+
+        } catch (Exception e) {
             throw new RuntimeException(e);
         }
         _cachedList = new ArrayList<PartitionManager>(_managers.values());
@@ -90,9 +90,9 @@ public class ZkCoordinator implements PartitionCoordinator {
     public PartitionManager getManager(Partition partition) {
         return _managers.get(partition);
     }
-    
+
     private boolean myOwnership(Partition id) {
-        int val = Math.abs(id.host.hashCode() + 23 * id.partition);        
+        int val = Math.abs(id.host.hashCode() + 23 * id.partition);
         return val % _totalTasks == _taskIndex;
-    } 
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/ZkHosts.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/ZkHosts.java b/src/jvm/storm/kafka/ZkHosts.java
index dfe3b4c..f2e0fc2 100644
--- a/src/jvm/storm/kafka/ZkHosts.java
+++ b/src/jvm/storm/kafka/ZkHosts.java
@@ -5,18 +5,18 @@ package storm.kafka;
  * Time: 14:38
  */
 public class ZkHosts implements BrokerHosts {
-	private static final String DEFAULT_ZK_PATH = "/brokers";
+    private static final String DEFAULT_ZK_PATH = "/brokers";
 
-	public String brokerZkStr = null;
-	public String brokerZkPath = null; // e.g., /kafka/brokers
-	public int refreshFreqSecs = 60;
+    public String brokerZkStr = null;
+    public String brokerZkPath = null; // e.g., /kafka/brokers
+    public int refreshFreqSecs = 60;
 
-	public ZkHosts(String brokerZkStr, String brokerZkPath) {
-		this.brokerZkStr = brokerZkStr;
-		this.brokerZkPath = brokerZkPath;
-	}
+    public ZkHosts(String brokerZkStr, String brokerZkPath) {
+        this.brokerZkStr = brokerZkStr;
+        this.brokerZkPath = brokerZkPath;
+    }
 
-	public ZkHosts(String brokerZkStr) {
-		this(brokerZkStr, DEFAULT_ZK_PATH);
-	}
+    public ZkHosts(String brokerZkStr) {
+        this(brokerZkStr, DEFAULT_ZK_PATH);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/ZkState.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/ZkState.java b/src/jvm/storm/kafka/ZkState.java
index 17ebb25..d5416af 100644
--- a/src/jvm/storm/kafka/ZkState.java
+++ b/src/jvm/storm/kafka/ZkState.java
@@ -20,78 +20,80 @@ public class ZkState {
     CuratorFramework _curator;
 
     private CuratorFramework newCurator(Map stateConf) throws Exception {
-        Integer port = (Integer)stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
-	String serverPorts = "";
-        for(String server: (List<String>)stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
+        Integer port = (Integer) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
+        String serverPorts = "";
+        for (String server : (List<String>) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
             serverPorts = serverPorts + server + ":" + port + ",";
         }
-	return CuratorFrameworkFactory.newClient(serverPorts,
-		Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)), 
-		15000, 
-		new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
-				Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
+        return CuratorFrameworkFactory.newClient(serverPorts,
+                Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
+                15000,
+                new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
+                        Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
     }
 
     public CuratorFramework getCurator() {
-	assert _curator != null;
+        assert _curator != null;
         return _curator;
     }
 
     public ZkState(Map stateConf) {
-	stateConf = new HashMap(stateConf);
+        stateConf = new HashMap(stateConf);
 
-	try {
-	    _curator = newCurator(stateConf);
-	    _curator.start();
-	} catch(Exception e) {
-	    throw new RuntimeException(e);
-	}
+        try {
+            _curator = newCurator(stateConf);
+            _curator.start();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
     }
 
-    public void writeJSON(String path, Map<Object,Object> data) {
-	LOG.info("Writing " + path + " the data " + data.toString());
+    public void writeJSON(String path, Map<Object, Object> data) {
+        LOG.info("Writing " + path + " the data " + data.toString());
         writeBytes(path, JSONValue.toJSONString(data).getBytes(Charset.forName("UTF-8")));
     }
 
     public void writeBytes(String path, byte[] bytes) {
         try {
-            if(_curator.checkExists().forPath(path)==null) {
+            if (_curator.checkExists().forPath(path) == null) {
                 _curator.create()
                         .creatingParentsIfNeeded()
                         .withMode(CreateMode.PERSISTENT)
                         .forPath(path, bytes);
             } else {
-		_curator.setData().forPath(path, bytes);
-	    }
-        } catch(Exception e) {
+                _curator.setData().forPath(path, bytes);
+            }
+        } catch (Exception e) {
             throw new RuntimeException(e);
         }
     }
 
-    public Map<Object,Object> readJSON(String path) {
-	try {
-	    byte[] b = readBytes(path);
-	    if(b==null) return null;
-	    return (Map<Object,Object>)JSONValue.parse(new String(b, "UTF-8"));
-	} catch(Exception e) {
-	    throw new RuntimeException(e);
-	}
+    public Map<Object, Object> readJSON(String path) {
+        try {
+            byte[] b = readBytes(path);
+            if (b == null) {
+                return null;
+            }
+            return (Map<Object, Object>) JSONValue.parse(new String(b, "UTF-8"));
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
     }
 
     public byte[] readBytes(String path) {
         try {
-            if(_curator.checkExists().forPath(path)!=null) {
-		return _curator.getData().forPath(path);
+            if (_curator.checkExists().forPath(path) != null) {
+                return _curator.getData().forPath(path);
             } else {
                 return null;
             }
-        } catch(Exception e) {
+        } catch (Exception e) {
             throw new RuntimeException(e);
         }
     }
 
     public void close() {
-	_curator.close();
-	_curator = null;
+        _curator.close();
+        _curator = null;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/Coordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/Coordinator.java b/src/jvm/storm/kafka/trident/Coordinator.java
index b7ddd3f..d97feed 100644
--- a/src/jvm/storm/kafka/trident/Coordinator.java
+++ b/src/jvm/storm/kafka/trident/Coordinator.java
@@ -11,26 +11,26 @@ import java.util.Map;
  */
 class Coordinator implements IPartitionedTridentSpout.Coordinator<GlobalPartitionInformation>, IOpaquePartitionedTridentSpout.Coordinator<GlobalPartitionInformation> {
 
-	private IBrokerReader reader;
-	private TridentKafkaConfig config;
-
-	public Coordinator(Map conf, TridentKafkaConfig tridentKafkaConfig) {
-		config = tridentKafkaConfig;
-		reader = KafkaUtils.makeBrokerReader(conf, config);
-	}
-
-	@Override
-	public void close() {
-		config.coordinator.close();
-	}
-
-	@Override
-	public boolean isReady(long txid) {
-		return config.coordinator.isReady(txid);
-	}
-
-	@Override
-	public GlobalPartitionInformation getPartitionsForBatch() {
-		return reader.getCurrentBrokers();
-	}
+    private IBrokerReader reader;
+    private TridentKafkaConfig config;
+
+    public Coordinator(Map conf, TridentKafkaConfig tridentKafkaConfig) {
+        config = tridentKafkaConfig;
+        reader = KafkaUtils.makeBrokerReader(conf, config);
+    }
+
+    @Override
+    public void close() {
+        config.coordinator.close();
+    }
+
+    @Override
+    public boolean isReady(long txid) {
+        return config.coordinator.isReady(txid);
+    }
+
+    @Override
+    public GlobalPartitionInformation getPartitionsForBatch() {
+        return reader.getCurrentBrokers();
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/DefaultCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/DefaultCoordinator.java b/src/jvm/storm/kafka/trident/DefaultCoordinator.java
index 3a47706..89cd503 100644
--- a/src/jvm/storm/kafka/trident/DefaultCoordinator.java
+++ b/src/jvm/storm/kafka/trident/DefaultCoordinator.java
@@ -10,5 +10,5 @@ public class DefaultCoordinator implements IBatchCoordinator {
     @Override
     public void close() {
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
index 139a0d7..6b0fdec 100644
--- a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
+++ b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
@@ -1,7 +1,7 @@
 package storm.kafka.trident;
 
-import storm.kafka.Partition;
 import storm.kafka.HostPort;
+import storm.kafka.Partition;
 
 import java.io.Serializable;
 import java.util.*;
@@ -12,55 +12,55 @@ import java.util.*;
  */
 public class GlobalPartitionInformation implements Iterable<Partition>, Serializable {
 
-	private Map<Integer, HostPort> partitionMap;
+    private Map<Integer, HostPort> partitionMap;
 
-	public GlobalPartitionInformation() {
-		partitionMap = new TreeMap<Integer, HostPort>();
-	}
+    public GlobalPartitionInformation() {
+        partitionMap = new TreeMap<Integer, HostPort>();
+    }
 
-	public void addPartition(int partitionId, HostPort broker) {
-		partitionMap.put(partitionId, broker);
-	}
+    public void addPartition(int partitionId, HostPort broker) {
+        partitionMap.put(partitionId, broker);
+    }
 
-	@Override
-	public String toString() {
-		return "GlobalPartitionInformation{" +
-				"partitionMap=" + partitionMap +
-				'}';
-	}
+    @Override
+    public String toString() {
+        return "GlobalPartitionInformation{" +
+                "partitionMap=" + partitionMap +
+                '}';
+    }
 
-	public HostPort getHostFor(Integer partitionId) {
-		return partitionMap.get(partitionId);
-	}
+    public HostPort getHostFor(Integer partitionId) {
+        return partitionMap.get(partitionId);
+    }
 
-	public List<Partition> getOrderedPartitions(){
-		List<Partition> partitions = new LinkedList<Partition>();
-		for (Map.Entry<Integer, HostPort> partition : partitionMap.entrySet()) {
-			partitions.add(new Partition(partition.getValue(), partition.getKey()));
-		}
-		return partitions;
-	}
+    public List<Partition> getOrderedPartitions() {
+        List<Partition> partitions = new LinkedList<Partition>();
+        for (Map.Entry<Integer, HostPort> partition : partitionMap.entrySet()) {
+            partitions.add(new Partition(partition.getValue(), partition.getKey()));
+        }
+        return partitions;
+    }
 
-	@Override
-	public Iterator<Partition> iterator() {
-		final Iterator<Map.Entry<Integer, HostPort>> iterator = partitionMap.entrySet().iterator();
+    @Override
+    public Iterator<Partition> iterator() {
+        final Iterator<Map.Entry<Integer, HostPort>> iterator = partitionMap.entrySet().iterator();
 
-		return new Iterator<Partition>() {
-			@Override
-			public boolean hasNext() {
-				return iterator.hasNext();
-			}
+        return new Iterator<Partition>() {
+            @Override
+            public boolean hasNext() {
+                return iterator.hasNext();
+            }
 
-			@Override
-			public Partition next() {
-				Map.Entry<Integer, HostPort> next = iterator.next();
-				return new Partition(next.getValue(), next.getKey());
-			}
+            @Override
+            public Partition next() {
+                Map.Entry<Integer, HostPort> next = iterator.next();
+                return new Partition(next.getValue(), next.getKey());
+            }
 
-			@Override
-			public void remove() {
-				iterator.remove();
-			}
-		};
-	}
+            @Override
+            public void remove() {
+                iterator.remove();
+            }
+        };
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/IBatchCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/IBatchCoordinator.java b/src/jvm/storm/kafka/trident/IBatchCoordinator.java
index 9199a8d..1b8a8ce 100644
--- a/src/jvm/storm/kafka/trident/IBatchCoordinator.java
+++ b/src/jvm/storm/kafka/trident/IBatchCoordinator.java
@@ -4,5 +4,6 @@ import java.io.Serializable;
 
 public interface IBatchCoordinator extends Serializable {
     boolean isReady(long txid);
+
     void close();
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/IBrokerReader.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/IBrokerReader.java b/src/jvm/storm/kafka/trident/IBrokerReader.java
index 4e2421b..73c9738 100644
--- a/src/jvm/storm/kafka/trident/IBrokerReader.java
+++ b/src/jvm/storm/kafka/trident/IBrokerReader.java
@@ -3,5 +3,6 @@ package storm.kafka.trident;
 public interface IBrokerReader {
 
     GlobalPartitionInformation getCurrentBrokers();
+
     void close();
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/KafkaUtils.java b/src/jvm/storm/kafka/trident/KafkaUtils.java
index efe4fef..18dd851 100644
--- a/src/jvm/storm/kafka/trident/KafkaUtils.java
+++ b/src/jvm/storm/kafka/trident/KafkaUtils.java
@@ -16,33 +16,33 @@ import java.util.Set;
 
 public class KafkaUtils {
     public static final Logger LOG = LoggerFactory.getLogger(KafkaUtils.class);
-	private static final int NO_OFFSET = -5;
+    private static final int NO_OFFSET = -5;
 
 
-	public static IBrokerReader makeBrokerReader(Map stormConf, KafkaConfig conf) {
-		if(conf.hosts instanceof StaticHosts) {
-			return new StaticBrokerReader(((StaticHosts) conf.hosts).getPartitionInformation());
-		} else {
-			return new ZkBrokerReader(stormConf, conf.topic, (ZkHosts) conf.hosts);
-		}
-	}
+    public static IBrokerReader makeBrokerReader(Map stormConf, KafkaConfig conf) {
+        if (conf.hosts instanceof StaticHosts) {
+            return new StaticBrokerReader(((StaticHosts) conf.hosts).getPartitionInformation());
+        } else {
+            return new ZkBrokerReader(stormConf, conf.topic, (ZkHosts) conf.hosts);
+        }
+    }
 
-	public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
-		TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
-		Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
-		requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
-		OffsetRequest request = new OffsetRequest(
-				requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
+    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
+        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
+        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
+        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
+        OffsetRequest request = new OffsetRequest(
+                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
 
-		long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
-		if ( offsets.length > 0) {
-			return offsets[0];
-		} else {
-			return NO_OFFSET;
-		}
-	}
+        long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
+        if (offsets.length > 0) {
+            return offsets[0];
+        } else {
+            return NO_OFFSET;
+        }
+    }
 
-	public static class KafkaOffsetMetric implements IMetric {
+    public static class KafkaOffsetMetric implements IMetric {
         Map<Partition, Long> _partitionToOffset = new HashMap<Partition, Long>();
         Set<Partition> _partitions;
         String _topic;
@@ -64,16 +64,16 @@ public class KafkaUtils {
                 long totalLatestTimeOffset = 0;
                 long totalLatestEmittedOffset = 0;
                 HashMap ret = new HashMap();
-                if(_partitions != null && _partitions.size() == _partitionToOffset.size()) {
-                    for(Map.Entry<Partition, Long> e : _partitionToOffset.entrySet()) {
+                if (_partitions != null && _partitions.size() == _partitionToOffset.size()) {
+                    for (Map.Entry<Partition, Long> e : _partitionToOffset.entrySet()) {
                         Partition partition = e.getKey();
                         SimpleConsumer consumer = _connections.getConnection(partition);
-                        if(consumer == null) {
+                        if (consumer == null) {
                             LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
                             return null;
                         }
                         long latestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
-                        if(latestTimeOffset == 0) {
+                        if (latestTimeOffset == 0) {
                             LOG.warn("No data found in Kafka Partition " + partition.getId());
                             return null;
                         }
@@ -93,18 +93,22 @@ public class KafkaUtils {
                 } else {
                     LOG.info("Metrics Tick: Not enough data to calculate spout lag.");
                 }
-            } catch(Throwable t) {
+            } catch (Throwable t) {
                 LOG.warn("Metrics Tick: Exception when computing kafkaOffset metric.", t);
             }
             return null;
         }
 
-       public void refreshPartitions(Set<Partition> partitions) {
-           _partitions = partitions;
-           Iterator<Partition> it = _partitionToOffset.keySet().iterator();
-           while(it.hasNext()) {
-               if(!partitions.contains(it.next())) it.remove();
-           }
-       }
-    };
+        public void refreshPartitions(Set<Partition> partitions) {
+            _partitions = partitions;
+            Iterator<Partition> it = _partitionToOffset.keySet().iterator();
+            while (it.hasNext()) {
+                if (!partitions.contains(it.next())) {
+                    it.remove();
+                }
+            }
+        }
+    }
+
+    ;
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/MaxMetric.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/MaxMetric.java b/src/jvm/storm/kafka/trident/MaxMetric.java
index 087245f..a8f88ba 100644
--- a/src/jvm/storm/kafka/trident/MaxMetric.java
+++ b/src/jvm/storm/kafka/trident/MaxMetric.java
@@ -11,8 +11,12 @@ public class MaxMetric implements ICombiner<Long> {
 
     @Override
     public Long combine(Long l1, Long l2) {
-        if(l1 == null) return l2;
-        if(l2 == null) return l1;
+        if (l1 == null) {
+            return l2;
+        }
+        if (l2 == null) {
+            return l1;
+        }
         return Math.max(l1, l2);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java b/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
index 0f6e6c5..35b7033 100644
--- a/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
+++ b/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
@@ -11,19 +11,19 @@ import java.util.UUID;
 
 public class OpaqueTridentKafkaSpout implements IOpaquePartitionedTridentSpout<GlobalPartitionInformation, Partition, Map> {
 
-    
+
     TridentKafkaConfig _config;
     String _topologyInstanceId = UUID.randomUUID().toString();
-    
+
     public OpaqueTridentKafkaSpout(TridentKafkaConfig config) {
         _config = config;
     }
-    
+
     @Override
     public IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map> getEmitter(Map conf, TopologyContext context) {
-		return new TridentKafkaEmitter(conf, context, _config, _topologyInstanceId).asOpaqueEmitter();
+        return new TridentKafkaEmitter(conf, context, _config, _topologyInstanceId).asOpaqueEmitter();
     }
-    
+
     @Override
     public IOpaquePartitionedTridentSpout.Coordinator getCoordinator(Map conf, TopologyContext tc) {
         return new storm.kafka.trident.Coordinator(conf, _config);
@@ -32,8 +32,8 @@ public class OpaqueTridentKafkaSpout implements IOpaquePartitionedTridentSpout<G
     @Override
     public Fields getOutputFields() {
         return _config.scheme.getOutputFields();
-    }    
-    
+    }
+
     @Override
     public Map<String, Object> getComponentConfiguration() {
         return null;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/StaticBrokerReader.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/StaticBrokerReader.java b/src/jvm/storm/kafka/trident/StaticBrokerReader.java
index dabbd5e..98a8f53 100644
--- a/src/jvm/storm/kafka/trident/StaticBrokerReader.java
+++ b/src/jvm/storm/kafka/trident/StaticBrokerReader.java
@@ -2,12 +2,12 @@ package storm.kafka.trident;
 
 public class StaticBrokerReader implements IBrokerReader {
 
-	private GlobalPartitionInformation brokers = new GlobalPartitionInformation();
-    
+    private GlobalPartitionInformation brokers = new GlobalPartitionInformation();
+
     public StaticBrokerReader(GlobalPartitionInformation partitionInformation) {
         this.brokers = partitionInformation;
     }
-    
+
     @Override
     public GlobalPartitionInformation getCurrentBrokers() {
         return brokers;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java b/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
index 173a98f..b32d301 100644
--- a/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
+++ b/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
@@ -10,7 +10,7 @@ import java.util.UUID;
 
 
 public class TransactionalTridentKafkaSpout implements IPartitionedTridentSpout<GlobalPartitionInformation, Partition, Map> {
-    
+
     TridentKafkaConfig _config;
     String _topologyInstanceId = UUID.randomUUID().toString();
 
@@ -26,14 +26,14 @@ public class TransactionalTridentKafkaSpout implements IPartitionedTridentSpout<
 
     @Override
     public IPartitionedTridentSpout.Emitter getEmitter(Map conf, TopologyContext context) {
-		return new TridentKafkaEmitter(conf, context, _config, _topologyInstanceId).asTransactionalEmitter();
+        return new TridentKafkaEmitter(conf, context, _config, _topologyInstanceId).asTransactionalEmitter();
     }
 
     @Override
     public Fields getOutputFields() {
-		return _config.scheme.getOutputFields();
+        return _config.scheme.getOutputFields();
     }
-        
+
     @Override
     public Map<String, Object> getComponentConfiguration() {
         return null;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/TridentKafkaConfig.java b/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
index 7195500..073afa2 100644
--- a/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
+++ b/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
@@ -13,8 +13,8 @@ public class TridentKafkaConfig extends KafkaConfig {
         super(hosts, topic);
     }
 
-	public TridentKafkaConfig(BrokerHosts hosts, String topic, String clientId) {
-		super(hosts, topic, clientId);
-	}
+    public TridentKafkaConfig(BrokerHosts hosts, String topic, String clientId) {
+        super(hosts, topic, clientId);
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
index 282e67a..ab4ec63 100644
--- a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
+++ b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
@@ -34,237 +34,241 @@ import java.util.Map;
  */
 public class TridentKafkaEmitter {
 
-	public static final Logger LOG = LoggerFactory.getLogger(TridentKafkaEmitter.class);
-
-	private DynamicPartitionConnections _connections;
-	private String _topologyName;
-	private KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric;
-	private ReducedMetric _kafkaMeanFetchLatencyMetric;
-	private CombinedMetric _kafkaMaxFetchLatencyMetric;
-	private TridentKafkaConfig _config;
-	private String _topologyInstanceId;
-
-	public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
-		_config = config;
-		_topologyInstanceId = topologyInstanceId;
-		_connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
-		_topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
-		_kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_config.topic, _connections);
-		context.registerMetric("kafkaOffset", _kafkaOffsetMetric, 60);
-		_kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), 60);
-		_kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), 60);
-	}
-
-
-	private Map failFastEmitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
-		SimpleConsumer consumer = _connections.register(partition);
-		Map ret = doEmitNewPartitionBatch(consumer, partition, collector, lastMeta);
-		_kafkaOffsetMetric.setLatestEmittedOffset(partition, (Long) ret.get("offset"));
-		return ret;
-	}
-
-	private Map emitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
-		try {
-			return failFastEmitNewPartitionBatch(attempt, collector, partition, lastMeta);
-		} catch (FailedFetchException e) {
-			LOG.warn("Failed to fetch from partition " + partition);
-			if (lastMeta == null) {
-				return null;
-			} else {
-				Map ret = new HashMap();
-				ret.put("offset", lastMeta.get("nextOffset"));
-				ret.put("nextOffset", lastMeta.get("nextOffset"));
-				ret.put("partition", partition.partition);
-				ret.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
-				ret.put("topic", _config.topic);
-				ret.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
-				return ret;
-			}
-		}
-	}
-
-	private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
-		long offset;
-		if (lastMeta != null) {
-			String lastInstanceId = null;
-			Map lastTopoMeta = (Map) lastMeta.get("topology");
-			if (lastTopoMeta != null) {
-				lastInstanceId = (String) lastTopoMeta.get("id");
-			}
-			if (_config.forceFromStart && !_topologyInstanceId.equals(lastInstanceId)) {
-				offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config.startOffsetTime);
-			} else {
-				offset = (Long) lastMeta.get("nextOffset");
-			}
-		} else {
-			long startTime = kafka.api.OffsetRequest.LatestTime();
-			if (_config.forceFromStart) startTime = _config.startOffsetTime;
-			offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, startTime);
-		}
-		ByteBufferMessageSet msgs;
-		try {
-			msgs = fetchMessages(consumer, partition, offset);
-		} catch (Exception e) {
-			if (e instanceof ConnectException) {
-				throw new FailedFetchException(e);
-			} else {
-				throw new RuntimeException(e);
-			}
-		}
-		long endoffset = offset;
-		for (MessageAndOffset msg : msgs) {
-			emit(collector, msg.message());
-			endoffset = msg.nextOffset();
-		}
-		Map newMeta = new HashMap();
-		newMeta.put("offset", offset);
-		newMeta.put("nextOffset", endoffset);
-		newMeta.put("instanceId", _topologyInstanceId);
-		newMeta.put("partition", partition.partition);
-		newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
-		newMeta.put("topic", _config.topic);
-		newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
-		return newMeta;
-	}
-
-	private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) {
-		ByteBufferMessageSet msgs;
-		long start = System.nanoTime();
-		FetchRequestBuilder builder = new FetchRequestBuilder();
-		FetchRequest fetchRequest = builder.addFetch(_config.topic, partition.partition, offset, _config.fetchSizeBytes).clientId(_config.clientId).build();
-		msgs = consumer.fetch(fetchRequest).messageSet(_config.topic, partition.partition);
-		long end = System.nanoTime();
-		long millis = (end - start) / 1000000;
-		_kafkaMeanFetchLatencyMetric.update(millis);
-		_kafkaMaxFetchLatencyMetric.update(millis);
-		return msgs;
-	}
-
-	/**
-	 * re-emit the batch described by the meta data provided
-	 *
-	 * @param attempt
-	 * @param collector
-	 * @param partition
-	 * @param meta
-	 */
-	private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
-		LOG.info("re-emitting batch, attempt " + attempt);
-		String instanceId = (String) meta.get("instanceId");
-		if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
-			SimpleConsumer consumer = _connections.register(partition);
-			long offset = (Long) meta.get("offset");
-			long nextOffset = (Long) meta.get("nextOffset");
-			ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
-			for (MessageAndOffset msg : msgs) {
-				if (offset == nextOffset) break;
-				if (offset > nextOffset) {
-					throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
-				}
-				emit(collector, msg.message());
-				offset = msg.nextOffset();
-			}
-		}
-	}
-
-	private void emit(TridentCollector collector, Message msg) {
-		Iterable<List<Object>> values =
-				_config.scheme.deserialize(Utils.toByteArray(msg.payload()));
-		if (values != null) {
-			for (List<Object> value : values) {
-				collector.emit(value);
-			}
-		}
-	}
-
-	private void clear() {
-		_connections.clear();
-	}
-
-	private List<Partition> orderPartitions(GlobalPartitionInformation partitions) {
-		return partitions.getOrderedPartitions();
-	}
-
-	private void refresh(List<Partition> list) {
-		_connections.clear();
-		_kafkaOffsetMetric.refreshPartitions(new HashSet<Partition>(list));
-	}
-
-
-	public IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map> asOpaqueEmitter() {
-
-		return new IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map>() {
-
-			/**
-			 * Emit a batch of tuples for a partition/transaction.
-			 *
-			 * Return the metadata describing this batch that will be used as lastPartitionMeta
-			 * for defining the parameters of the next batch.
-			 */
-			@Override
-			public Map emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
-				return emitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
-			}
-
-			@Override
-			public void refreshPartitions(List<Partition> partitions) {
-				refresh(partitions);
-			}
-
-			@Override
-			public List<Partition> getOrderedPartitions(GlobalPartitionInformation partitionInformation) {
-				return orderPartitions(partitionInformation);
-			}
-
-			@Override
-			public void close() {
-				clear();
-			}
-		};
-	}
-
-	public IPartitionedTridentSpout.Emitter asTransactionalEmitter() {
-		return new IPartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map>() {
-
-			/**
-			 * Emit a batch of tuples for a partition/transaction that's never been emitted before.
-			 * Return the metadata that can be used to reconstruct this partition/batch in the future.
-			 */
-			@Override
-			public Map emitPartitionBatchNew(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
-				return failFastEmitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
-			}
-
-			/**
-			 * Emit a batch of tuples for a partition/transaction that has been emitted before, using
-			 * the metadata created when it was first emitted.
-			 */
-			@Override
-			public void emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
-				reEmitPartitionBatch(transactionAttempt, tridentCollector, partition, map);
-			}
-
-			/**
-			 * This method is called when this task is responsible for a new set of partitions. Should be used
-			 * to manage things like connections to brokers.
-			 */
-			@Override
-			public void refreshPartitions(List<Partition> partitions) {
-				refresh(partitions);
-			}
-
-			@Override
-			public List<Partition> getOrderedPartitions(GlobalPartitionInformation partitionInformation) {
-				return orderPartitions(partitionInformation);
-			}
-
-			@Override
-			public void close() {
-				clear();
-			}
-		};
-
-	}
+    public static final Logger LOG = LoggerFactory.getLogger(TridentKafkaEmitter.class);
+
+    private DynamicPartitionConnections _connections;
+    private String _topologyName;
+    private KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric;
+    private ReducedMetric _kafkaMeanFetchLatencyMetric;
+    private CombinedMetric _kafkaMaxFetchLatencyMetric;
+    private TridentKafkaConfig _config;
+    private String _topologyInstanceId;
+
+    public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
+        _config = config;
+        _topologyInstanceId = topologyInstanceId;
+        _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
+        _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
+        _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_config.topic, _connections);
+        context.registerMetric("kafkaOffset", _kafkaOffsetMetric, 60);
+        _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), 60);
+        _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), 60);
+    }
+
+
+    private Map failFastEmitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
+        SimpleConsumer consumer = _connections.register(partition);
+        Map ret = doEmitNewPartitionBatch(consumer, partition, collector, lastMeta);
+        _kafkaOffsetMetric.setLatestEmittedOffset(partition, (Long) ret.get("offset"));
+        return ret;
+    }
+
+    private Map emitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
+        try {
+            return failFastEmitNewPartitionBatch(attempt, collector, partition, lastMeta);
+        } catch (FailedFetchException e) {
+            LOG.warn("Failed to fetch from partition " + partition);
+            if (lastMeta == null) {
+                return null;
+            } else {
+                Map ret = new HashMap();
+                ret.put("offset", lastMeta.get("nextOffset"));
+                ret.put("nextOffset", lastMeta.get("nextOffset"));
+                ret.put("partition", partition.partition);
+                ret.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
+                ret.put("topic", _config.topic);
+                ret.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
+                return ret;
+            }
+        }
+    }
+
+    private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
+        long offset;
+        if (lastMeta != null) {
+            String lastInstanceId = null;
+            Map lastTopoMeta = (Map) lastMeta.get("topology");
+            if (lastTopoMeta != null) {
+                lastInstanceId = (String) lastTopoMeta.get("id");
+            }
+            if (_config.forceFromStart && !_topologyInstanceId.equals(lastInstanceId)) {
+                offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config.startOffsetTime);
+            } else {
+                offset = (Long) lastMeta.get("nextOffset");
+            }
+        } else {
+            long startTime = kafka.api.OffsetRequest.LatestTime();
+            if (_config.forceFromStart) {
+                startTime = _config.startOffsetTime;
+            }
+            offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, startTime);
+        }
+        ByteBufferMessageSet msgs;
+        try {
+            msgs = fetchMessages(consumer, partition, offset);
+        } catch (Exception e) {
+            if (e instanceof ConnectException) {
+                throw new FailedFetchException(e);
+            } else {
+                throw new RuntimeException(e);
+            }
+        }
+        long endoffset = offset;
+        for (MessageAndOffset msg : msgs) {
+            emit(collector, msg.message());
+            endoffset = msg.nextOffset();
+        }
+        Map newMeta = new HashMap();
+        newMeta.put("offset", offset);
+        newMeta.put("nextOffset", endoffset);
+        newMeta.put("instanceId", _topologyInstanceId);
+        newMeta.put("partition", partition.partition);
+        newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
+        newMeta.put("topic", _config.topic);
+        newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
+        return newMeta;
+    }
+
+    private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) {
+        ByteBufferMessageSet msgs;
+        long start = System.nanoTime();
+        FetchRequestBuilder builder = new FetchRequestBuilder();
+        FetchRequest fetchRequest = builder.addFetch(_config.topic, partition.partition, offset, _config.fetchSizeBytes).clientId(_config.clientId).build();
+        msgs = consumer.fetch(fetchRequest).messageSet(_config.topic, partition.partition);
+        long end = System.nanoTime();
+        long millis = (end - start) / 1000000;
+        _kafkaMeanFetchLatencyMetric.update(millis);
+        _kafkaMaxFetchLatencyMetric.update(millis);
+        return msgs;
+    }
+
+    /**
+     * re-emit the batch described by the meta data provided
+     *
+     * @param attempt
+     * @param collector
+     * @param partition
+     * @param meta
+     */
+    private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
+        LOG.info("re-emitting batch, attempt " + attempt);
+        String instanceId = (String) meta.get("instanceId");
+        if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
+            SimpleConsumer consumer = _connections.register(partition);
+            long offset = (Long) meta.get("offset");
+            long nextOffset = (Long) meta.get("nextOffset");
+            ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
+            for (MessageAndOffset msg : msgs) {
+                if (offset == nextOffset) {
+                    break;
+                }
+                if (offset > nextOffset) {
+                    throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
+                }
+                emit(collector, msg.message());
+                offset = msg.nextOffset();
+            }
+        }
+    }
+
+    private void emit(TridentCollector collector, Message msg) {
+        Iterable<List<Object>> values =
+                _config.scheme.deserialize(Utils.toByteArray(msg.payload()));
+        if (values != null) {
+            for (List<Object> value : values) {
+                collector.emit(value);
+            }
+        }
+    }
+
+    private void clear() {
+        _connections.clear();
+    }
+
+    private List<Partition> orderPartitions(GlobalPartitionInformation partitions) {
+        return partitions.getOrderedPartitions();
+    }
+
+    private void refresh(List<Partition> list) {
+        _connections.clear();
+        _kafkaOffsetMetric.refreshPartitions(new HashSet<Partition>(list));
+    }
+
+
+    public IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map> asOpaqueEmitter() {
+
+        return new IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map>() {
+
+            /**
+             * Emit a batch of tuples for a partition/transaction.
+             *
+             * Return the metadata describing this batch that will be used as lastPartitionMeta
+             * for defining the parameters of the next batch.
+             */
+            @Override
+            public Map emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
+                return emitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
+            }
+
+            @Override
+            public void refreshPartitions(List<Partition> partitions) {
+                refresh(partitions);
+            }
+
+            @Override
+            public List<Partition> getOrderedPartitions(GlobalPartitionInformation partitionInformation) {
+                return orderPartitions(partitionInformation);
+            }
+
+            @Override
+            public void close() {
+                clear();
+            }
+        };
+    }
+
+    public IPartitionedTridentSpout.Emitter asTransactionalEmitter() {
+        return new IPartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map>() {
+
+            /**
+             * Emit a batch of tuples for a partition/transaction that's never been emitted before.
+             * Return the metadata that can be used to reconstruct this partition/batch in the future.
+             */
+            @Override
+            public Map emitPartitionBatchNew(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
+                return failFastEmitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
+            }
+
+            /**
+             * Emit a batch of tuples for a partition/transaction that has been emitted before, using
+             * the metadata created when it was first emitted.
+             */
+            @Override
+            public void emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
+                reEmitPartitionBatch(transactionAttempt, tridentCollector, partition, map);
+            }
+
+            /**
+             * This method is called when this task is responsible for a new set of partitions. Should be used
+             * to manage things like connections to brokers.
+             */
+            @Override
+            public void refreshPartitions(List<Partition> partitions) {
+                refresh(partitions);
+            }
+
+            @Override
+            public List<Partition> getOrderedPartitions(GlobalPartitionInformation partitionInformation) {
+                return orderPartitions(partitionInformation);
+            }
+
+            @Override
+            public void close() {
+                clear();
+            }
+        };
+
+    }
 
 
 }


[12/50] [abbrv] git commit: calculate start offset for new topology consistently

Posted by pt...@apache.org.
calculate start offset for new topology consistently


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/5b764cd9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/5b764cd9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/5b764cd9

Branch: refs/heads/master
Commit: 5b764cd9138dd93e7382e3472b9d3d33d4b286a3
Parents: 80005ba
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Jan 18 15:25:36 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Jan 18 15:25:36 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaUtils.java                  |  9 +++++++++
 src/jvm/storm/kafka/PartitionManager.java            | 10 +++++-----
 src/jvm/storm/kafka/trident/TridentKafkaEmitter.java |  6 +-----
 3 files changed, 15 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/5b764cd9/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaUtils.java b/src/jvm/storm/kafka/KafkaUtils.java
index 5094f14..d86519c 100644
--- a/src/jvm/storm/kafka/KafkaUtils.java
+++ b/src/jvm/storm/kafka/KafkaUtils.java
@@ -36,6 +36,15 @@ public class KafkaUtils {
         }
     }
 
+
+    public static long getOffset(SimpleConsumer consumer, String topic, int partition, KafkaConfig config) {
+        long startOffsetTime = kafka.api.OffsetRequest.LatestTime();
+        if ( config.forceFromStart ) {
+            startOffsetTime = config.startOffsetTime;
+        }
+        return getOffset(consumer, topic, partition, startOffsetTime);
+    }
+
     public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
         TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
         Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/5b764cd9/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionManager.java b/src/jvm/storm/kafka/PartitionManager.java
index 0861c25..f12c0d9 100644
--- a/src/jvm/storm/kafka/PartitionManager.java
+++ b/src/jvm/storm/kafka/PartitionManager.java
@@ -73,12 +73,12 @@ public class PartitionManager {
             LOG.warn("Error reading and/or parsing at ZkNode: " + path, e);
         }
 
-        if (!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.forceFromStart) {
+        if (jsonTopologyId == null || jsonOffset == null) { // failed to parse JSON?
+            _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition, spoutConfig);
+            LOG.info("No partition information found, using configuration to determine offset");
+        } else if (!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.forceFromStart) {
             _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition, spoutConfig.startOffsetTime);
-            LOG.info("Using startOffsetTime to choose last commit offset.");
-        } else if (jsonTopologyId == null || jsonOffset == null) { // failed to parse JSON?
-            _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition, kafka.api.OffsetRequest.LatestTime());
-            LOG.info("Setting last commit offset to HEAD.");
+            LOG.info("Topology change detected and reset from start forced, using configuration to determine offset");
         } else {
             _committedTo = jsonOffset;
             LOG.info("Read last commit offset from zookeeper: " + _committedTo + "; old topology_id: " + jsonTopologyId + " - new topology_id: " + topologyInstanceId );

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/5b764cd9/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
index eceba47..fbbbd4b 100644
--- a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
+++ b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
@@ -96,11 +96,7 @@ public class TridentKafkaEmitter {
                 offset = (Long) lastMeta.get("nextOffset");
             }
         } else {
-            long startTime = kafka.api.OffsetRequest.LatestTime();
-            if (_config.forceFromStart) {
-                startTime = _config.startOffsetTime;
-            }
-            offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, startTime);
+            offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config);
         }
         ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
         long endoffset = offset;


[31/50] [abbrv] git commit: update storm to 0.9.1-incubating

Posted by pt...@apache.org.
update storm to 0.9.1-incubating


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/c62d2e11
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/c62d2e11
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/c62d2e11

Branch: refs/heads/master
Commit: c62d2e115fd2fcbdb2f8030913ac8492ea59c302
Parents: 9796b52
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Tue Mar 18 11:42:48 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Tue Mar 18 11:42:48 2014 -0400

----------------------------------------------------------------------
 pom.xml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/c62d2e11/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 910041a..a9ceef1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -168,9 +168,9 @@
             </exclusions>
         </dependency>
         <dependency>
-            <groupId>storm</groupId>
-            <artifactId>storm</artifactId>
-            <version>0.9.0</version>
+            <groupId>org.apache.storm</groupId>
+            <artifactId>storm-core</artifactId>
+            <version>0.9.1-incubating</version>
             <scope>provided</scope>
         </dependency>
         <dependency>


[39/50] [abbrv] git commit: removed duplicated code

Posted by pt...@apache.org.
removed duplicated code


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/c8c04a6a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/c8c04a6a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/c8c04a6a

Branch: refs/heads/master
Commit: c8c04a6a6d9575f700d4c3db35927ddb5347a265
Parents: 9370c5c
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Apr 5 14:03:49 2014 +0100
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Apr 5 14:03:49 2014 +0100

----------------------------------------------------------------------
 src/jvm/storm/kafka/PartitionManager.java | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/c8c04a6a/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionManager.java b/src/jvm/storm/kafka/PartitionManager.java
index 915f0f9..03075bb 100644
--- a/src/jvm/storm/kafka/PartitionManager.java
+++ b/src/jvm/storm/kafka/PartitionManager.java
@@ -174,25 +174,20 @@ public class PartitionManager {
     }
 
     public void commit() {
-        long committedTo;
-        if (_pending.isEmpty()) {
-            committedTo = _emittedToOffset;
-        } else {
-            committedTo = _pending.first();
-        }
-        if (committedTo != _committedTo) {
-            LOG.info("Writing committed offset (" + committedTo + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
+        long lastCompletedOffset = lastCompletedOffset();
+        if (lastCompletedOffset != lastCommittedOffset()) {
+            LOG.info("Writing last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
             Map<Object, Object> data = ImmutableMap.builder()
                     .put("topology", ImmutableMap.of("id", _topologyInstanceId,
                             "name", _stormConf.get(Config.TOPOLOGY_NAME)))
-                    .put("offset", committedTo)
+                    .put("offset", lastCompletedOffset)
                     .put("partition", _partition.partition)
                     .put("broker", ImmutableMap.of("host", _partition.host.host,
                             "port", _partition.host.port))
                     .put("topic", _spoutConfig.topic).build();
             _state.writeJSON(committedPath(), data);
-            _committedTo = committedTo;
-            LOG.info("Wrote committed offset (" + committedTo + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
+            _committedTo = lastCompletedOffset;
+            LOG.info("Wrote last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
         } else {
             LOG.info("No new offset for " + _partition + " for topology: " + _topologyInstanceId);
         }


[27/50] [abbrv] git commit: preparing for tag

Posted by pt...@apache.org.
preparing for tag


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/f573001d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/f573001d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/f573001d

Branch: refs/heads/master
Commit: f573001db106d6b8d6af2167bb2b88f38a3738e5
Parents: bd0cc45
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Wed Feb 26 22:08:36 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Wed Feb 26 22:16:11 2014 +0000

----------------------------------------------------------------------
 CHANGELOG.md | 4 ++++
 pom.xml      | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/f573001d/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ced0ffc..74ab824 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,7 @@
+## 0.4.0
+* added support for reading kafka message keys
+* configurable metrics emit interval
 ## 0.3.0
 * updated partition path in zookeeper
 * added error handling for fetch request
+

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/f573001d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 910041a..c542158 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
     <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.4.0-SNAPSHOT</version>
+    <version>0.4.0</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
     <licenses>


[42/50] [abbrv] Add storm-kafka as an external module.

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
index 0000000,0000000..89cd503
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
@@@ -1,0 -1,0 +1,14 @@@
++package storm.kafka.trident;
++
++public class DefaultCoordinator implements IBatchCoordinator {
++
++    @Override
++    public boolean isReady(long txid) {
++        return true;
++    }
++
++    @Override
++    public void close() {
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
index 0000000,0000000..6f82f62
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
@@@ -1,0 -1,0 +1,85 @@@
++package storm.kafka.trident;
++
++import storm.kafka.Broker;
++import storm.kafka.Partition;
++
++import java.io.Serializable;
++import java.util.*;
++
++import com.google.common.base.Objects;
++
++/**
++ * Date: 14/05/2013
++ * Time: 19:18
++ */
++public class GlobalPartitionInformation implements Iterable<Partition>, Serializable {
++
++    private Map<Integer, Broker> partitionMap;
++
++    public GlobalPartitionInformation() {
++        partitionMap = new TreeMap<Integer, Broker>();
++    }
++
++    public void addPartition(int partitionId, Broker broker) {
++        partitionMap.put(partitionId, broker);
++    }
++
++    @Override
++    public String toString() {
++        return "GlobalPartitionInformation{" +
++                "partitionMap=" + partitionMap +
++                '}';
++    }
++
++    public Broker getBrokerFor(Integer partitionId) {
++        return partitionMap.get(partitionId);
++    }
++
++    public List<Partition> getOrderedPartitions() {
++        List<Partition> partitions = new LinkedList<Partition>();
++        for (Map.Entry<Integer, Broker> partition : partitionMap.entrySet()) {
++            partitions.add(new Partition(partition.getValue(), partition.getKey()));
++        }
++        return partitions;
++    }
++
++    @Override
++    public Iterator<Partition> iterator() {
++        final Iterator<Map.Entry<Integer, Broker>> iterator = partitionMap.entrySet().iterator();
++
++        return new Iterator<Partition>() {
++            @Override
++            public boolean hasNext() {
++                return iterator.hasNext();
++            }
++
++            @Override
++            public Partition next() {
++                Map.Entry<Integer, Broker> next = iterator.next();
++                return new Partition(next.getValue(), next.getKey());
++            }
++
++            @Override
++            public void remove() {
++                iterator.remove();
++            }
++        };
++    }
++
++    @Override
++    public int hashCode() {
++        return Objects.hashCode(partitionMap);
++    }
++
++    @Override
++    public boolean equals(Object obj) {
++        if (this == obj) {
++            return true;
++        }
++        if (obj == null || getClass() != obj.getClass()) {
++            return false;
++        }
++        final GlobalPartitionInformation other = (GlobalPartitionInformation) obj;
++        return Objects.equal(this.partitionMap, other.partitionMap);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
index 0000000,0000000..1b8a8ce
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
@@@ -1,0 -1,0 +1,9 @@@
++package storm.kafka.trident;
++
++import java.io.Serializable;
++
++public interface IBatchCoordinator extends Serializable {
++    boolean isReady(long txid);
++
++    void close();
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
index 0000000,0000000..73c9738
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
@@@ -1,0 -1,0 +1,8 @@@
++package storm.kafka.trident;
++
++public interface IBrokerReader {
++
++    GlobalPartitionInformation getCurrentBrokers();
++
++    void close();
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
index 0000000,0000000..a8f88ba
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
@@@ -1,0 -1,0 +1,23 @@@
++package storm.kafka.trident;
++
++
++import backtype.storm.metric.api.ICombiner;
++
++public class MaxMetric implements ICombiner<Long> {
++    @Override
++    public Long identity() {
++        return null;
++    }
++
++    @Override
++    public Long combine(Long l1, Long l2) {
++        if (l1 == null) {
++            return l2;
++        }
++        if (l2 == null) {
++            return l1;
++        }
++        return Math.max(l1, l2);
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
index 0000000,0000000..35b7033
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
@@@ -1,0 -1,0 +1,42 @@@
++package storm.kafka.trident;
++
++import backtype.storm.task.TopologyContext;
++import backtype.storm.tuple.Fields;
++import storm.kafka.Partition;
++import storm.trident.spout.IOpaquePartitionedTridentSpout;
++
++import java.util.Map;
++import java.util.UUID;
++
++
++public class OpaqueTridentKafkaSpout implements IOpaquePartitionedTridentSpout<GlobalPartitionInformation, Partition, Map> {
++
++
++    TridentKafkaConfig _config;
++    String _topologyInstanceId = UUID.randomUUID().toString();
++
++    public OpaqueTridentKafkaSpout(TridentKafkaConfig config) {
++        _config = config;
++    }
++
++    @Override
++    public IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map> getEmitter(Map conf, TopologyContext context) {
++        return new TridentKafkaEmitter(conf, context, _config, _topologyInstanceId).asOpaqueEmitter();
++    }
++
++    @Override
++    public IOpaquePartitionedTridentSpout.Coordinator getCoordinator(Map conf, TopologyContext tc) {
++        return new storm.kafka.trident.Coordinator(conf, _config);
++    }
++
++    @Override
++    public Fields getOutputFields() {
++        return _config.scheme.getOutputFields();
++    }
++
++    @Override
++    public Map<String, Object> getComponentConfiguration() {
++        return null;
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
index 0000000,0000000..98a8f53
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
@@@ -1,0 -1,0 +1,19 @@@
++package storm.kafka.trident;
++
++public class StaticBrokerReader implements IBrokerReader {
++
++    private GlobalPartitionInformation brokers = new GlobalPartitionInformation();
++
++    public StaticBrokerReader(GlobalPartitionInformation partitionInformation) {
++        this.brokers = partitionInformation;
++    }
++
++    @Override
++    public GlobalPartitionInformation getCurrentBrokers() {
++        return brokers;
++    }
++
++    @Override
++    public void close() {
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
index 0000000,0000000..b32d301
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
@@@ -1,0 -1,0 +1,41 @@@
++package storm.kafka.trident;
++
++import backtype.storm.task.TopologyContext;
++import backtype.storm.tuple.Fields;
++import storm.kafka.Partition;
++import storm.trident.spout.IPartitionedTridentSpout;
++
++import java.util.Map;
++import java.util.UUID;
++
++
++public class TransactionalTridentKafkaSpout implements IPartitionedTridentSpout<GlobalPartitionInformation, Partition, Map> {
++
++    TridentKafkaConfig _config;
++    String _topologyInstanceId = UUID.randomUUID().toString();
++
++    public TransactionalTridentKafkaSpout(TridentKafkaConfig config) {
++        _config = config;
++    }
++
++
++    @Override
++    public IPartitionedTridentSpout.Coordinator getCoordinator(Map conf, TopologyContext context) {
++        return new storm.kafka.trident.Coordinator(conf, _config);
++    }
++
++    @Override
++    public IPartitionedTridentSpout.Emitter getEmitter(Map conf, TopologyContext context) {
++        return new TridentKafkaEmitter(conf, context, _config, _topologyInstanceId).asTransactionalEmitter();
++    }
++
++    @Override
++    public Fields getOutputFields() {
++        return _config.scheme.getOutputFields();
++    }
++
++    @Override
++    public Map<String, Object> getComponentConfiguration() {
++        return null;
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
index 0000000,0000000..073afa2
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
@@@ -1,0 -1,0 +1,20 @@@
++package storm.kafka.trident;
++
++import storm.kafka.BrokerHosts;
++import storm.kafka.KafkaConfig;
++
++
++public class TridentKafkaConfig extends KafkaConfig {
++
++
++    public final IBatchCoordinator coordinator = new DefaultCoordinator();
++
++    public TridentKafkaConfig(BrokerHosts hosts, String topic) {
++        super(hosts, topic);
++    }
++
++    public TridentKafkaConfig(BrokerHosts hosts, String topic, String clientId) {
++        super(hosts, topic, clientId);
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
index 0000000,0000000..973ce8f
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
@@@ -1,0 -1,0 +1,256 @@@
++package storm.kafka.trident;
++
++import backtype.storm.Config;
++import backtype.storm.metric.api.CombinedMetric;
++import backtype.storm.metric.api.MeanReducer;
++import backtype.storm.metric.api.ReducedMetric;
++import backtype.storm.task.TopologyContext;
++import backtype.storm.utils.Utils;
++import com.google.common.collect.ImmutableMap;
++import kafka.javaapi.consumer.SimpleConsumer;
++import kafka.javaapi.message.ByteBufferMessageSet;
++import kafka.message.Message;
++import kafka.message.MessageAndOffset;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.DynamicPartitionConnections;
++import storm.kafka.FailedFetchException;
++import storm.kafka.KafkaUtils;
++import storm.kafka.Partition;
++import storm.trident.operation.TridentCollector;
++import storm.trident.spout.IOpaquePartitionedTridentSpout;
++import storm.trident.spout.IPartitionedTridentSpout;
++import storm.trident.topology.TransactionAttempt;
++
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Map;
++
++/**
++ * Date: 21/05/2013
++ * Time: 08:38
++ */
++public class TridentKafkaEmitter {
++
++    public static final Logger LOG = LoggerFactory.getLogger(TridentKafkaEmitter.class);
++
++    private DynamicPartitionConnections _connections;
++    private String _topologyName;
++    private KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric;
++    private ReducedMetric _kafkaMeanFetchLatencyMetric;
++    private CombinedMetric _kafkaMaxFetchLatencyMetric;
++    private TridentKafkaConfig _config;
++    private String _topologyInstanceId;
++
++    public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
++        _config = config;
++        _topologyInstanceId = topologyInstanceId;
++        _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
++        _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
++        _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_config.topic, _connections);
++        context.registerMetric("kafkaOffset", _kafkaOffsetMetric, _config.metricsTimeBucketSizeInSecs);
++        _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), _config.metricsTimeBucketSizeInSecs);
++        _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), _config.metricsTimeBucketSizeInSecs);
++    }
++
++
++    private Map failFastEmitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
++        SimpleConsumer consumer = _connections.register(partition);
++        Map ret = doEmitNewPartitionBatch(consumer, partition, collector, lastMeta);
++        _kafkaOffsetMetric.setLatestEmittedOffset(partition, (Long) ret.get("offset"));
++        return ret;
++    }
++
++    private Map emitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
++        try {
++            return failFastEmitNewPartitionBatch(attempt, collector, partition, lastMeta);
++        } catch (FailedFetchException e) {
++            LOG.warn("Failed to fetch from partition " + partition);
++            if (lastMeta == null) {
++                return null;
++            } else {
++                Map ret = new HashMap();
++                ret.put("offset", lastMeta.get("nextOffset"));
++                ret.put("nextOffset", lastMeta.get("nextOffset"));
++                ret.put("partition", partition.partition);
++                ret.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
++                ret.put("topic", _config.topic);
++                ret.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
++                return ret;
++            }
++        }
++    }
++
++    private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
++        long offset;
++        if (lastMeta != null) {
++            String lastInstanceId = null;
++            Map lastTopoMeta = (Map) lastMeta.get("topology");
++            if (lastTopoMeta != null) {
++                lastInstanceId = (String) lastTopoMeta.get("id");
++            }
++            if (_config.forceFromStart && !_topologyInstanceId.equals(lastInstanceId)) {
++                offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config.startOffsetTime);
++            } else {
++                offset = (Long) lastMeta.get("nextOffset");
++            }
++        } else {
++            offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config);
++        }
++        ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
++        long endoffset = offset;
++        for (MessageAndOffset msg : msgs) {
++            emit(collector, msg.message());
++            endoffset = msg.nextOffset();
++        }
++        Map newMeta = new HashMap();
++        newMeta.put("offset", offset);
++        newMeta.put("nextOffset", endoffset);
++        newMeta.put("instanceId", _topologyInstanceId);
++        newMeta.put("partition", partition.partition);
++        newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
++        newMeta.put("topic", _config.topic);
++        newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
++        return newMeta;
++    }
++
++    private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) {
++        long start = System.nanoTime();
++        ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
++        long end = System.nanoTime();
++        long millis = (end - start) / 1000000;
++        _kafkaMeanFetchLatencyMetric.update(millis);
++        _kafkaMaxFetchLatencyMetric.update(millis);
++        return msgs;
++    }
++
++    /**
++     * re-emit the batch described by the meta data provided
++     *
++     * @param attempt
++     * @param collector
++     * @param partition
++     * @param meta
++     */
++    private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
++        LOG.info("re-emitting batch, attempt " + attempt);
++        String instanceId = (String) meta.get("instanceId");
++        if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
++            SimpleConsumer consumer = _connections.register(partition);
++            long offset = (Long) meta.get("offset");
++            long nextOffset = (Long) meta.get("nextOffset");
++            ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
++            for (MessageAndOffset msg : msgs) {
++                if (offset == nextOffset) {
++                    break;
++                }
++                if (offset > nextOffset) {
++                    throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
++                }
++                emit(collector, msg.message());
++                offset = msg.nextOffset();
++            }
++        }
++    }
++
++    private void emit(TridentCollector collector, Message msg) {
++        Iterable<List<Object>> values = KafkaUtils.generateTuples(_config, msg);
++        if (values != null) {
++            for (List<Object> value : values) {
++                collector.emit(value);
++            }
++        }
++    }
++
++    private void clear() {
++        _connections.clear();
++    }
++
++    private List<Partition> orderPartitions(GlobalPartitionInformation partitions) {
++        return partitions.getOrderedPartitions();
++    }
++
++    private void refresh(List<Partition> list) {
++        _connections.clear();
++        _kafkaOffsetMetric.refreshPartitions(new HashSet<Partition>(list));
++    }
++
++
++    public IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map> asOpaqueEmitter() {
++
++        return new IOpaquePartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map>() {
++
++            /**
++             * Emit a batch of tuples for a partition/transaction.
++             *
++             * Return the metadata describing this batch that will be used as lastPartitionMeta
++             * for defining the parameters of the next batch.
++             */
++            @Override
++            public Map emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
++                return emitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
++            }
++
++            @Override
++            public void refreshPartitions(List<Partition> partitions) {
++                refresh(partitions);
++            }
++
++            @Override
++            public List<Partition> getOrderedPartitions(GlobalPartitionInformation partitionInformation) {
++                return orderPartitions(partitionInformation);
++            }
++
++            @Override
++            public void close() {
++                clear();
++            }
++        };
++    }
++
++    public IPartitionedTridentSpout.Emitter asTransactionalEmitter() {
++        return new IPartitionedTridentSpout.Emitter<GlobalPartitionInformation, Partition, Map>() {
++
++            /**
++             * Emit a batch of tuples for a partition/transaction that's never been emitted before.
++             * Return the metadata that can be used to reconstruct this partition/batch in the future.
++             */
++            @Override
++            public Map emitPartitionBatchNew(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
++                return failFastEmitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
++            }
++
++            /**
++             * Emit a batch of tuples for a partition/transaction that has been emitted before, using
++             * the metadata created when it was first emitted.
++             */
++            @Override
++            public void emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
++                reEmitPartitionBatch(transactionAttempt, tridentCollector, partition, map);
++            }
++
++            /**
++             * This method is called when this task is responsible for a new set of partitions. Should be used
++             * to manage things like connections to brokers.
++             */
++            @Override
++            public void refreshPartitions(List<Partition> partitions) {
++                refresh(partitions);
++            }
++
++            @Override
++            public List<Partition> getOrderedPartitions(GlobalPartitionInformation partitionInformation) {
++                return orderPartitions(partitionInformation);
++            }
++
++            @Override
++            public void close() {
++                clear();
++            }
++        };
++
++    }
++
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
index 0000000,0000000..5e2361d
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
@@@ -1,0 -1,0 +1,45 @@@
++package storm.kafka.trident;
++
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.DynamicBrokersReader;
++import storm.kafka.ZkHosts;
++
++import java.util.Map;
++
++
++public class ZkBrokerReader implements IBrokerReader {
++
++    public static final Logger LOG = LoggerFactory.getLogger(ZkBrokerReader.class);
++
++    GlobalPartitionInformation cachedBrokers;
++    DynamicBrokersReader reader;
++    long lastRefreshTimeMs;
++
++
++    long refreshMillis;
++
++    public ZkBrokerReader(Map conf, String topic, ZkHosts hosts) {
++        reader = new DynamicBrokersReader(conf, hosts.brokerZkStr, hosts.brokerZkPath, topic);
++        cachedBrokers = reader.getBrokerInfo();
++        lastRefreshTimeMs = System.currentTimeMillis();
++        refreshMillis = hosts.refreshFreqSecs * 1000L;
++
++    }
++
++    @Override
++    public GlobalPartitionInformation getCurrentBrokers() {
++        long currTime = System.currentTimeMillis();
++        if (currTime > lastRefreshTimeMs + refreshMillis) {
++            LOG.info("brokers need refreshing because " + refreshMillis + "ms have expired");
++            cachedBrokers = reader.getBrokerInfo();
++            lastRefreshTimeMs = currTime;
++        }
++        return cachedBrokers;
++    }
++
++    @Override
++    public void close() {
++        reader.close();
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/DynamicBrokersReaderTest.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/DynamicBrokersReaderTest.java
index 0000000,0000000..d03bab3
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/DynamicBrokersReaderTest.java
@@@ -1,0 -1,0 +1,155 @@@
++package storm.kafka;
++
++import backtype.storm.Config;
++import com.netflix.curator.framework.CuratorFramework;
++import com.netflix.curator.framework.CuratorFrameworkFactory;
++import com.netflix.curator.retry.ExponentialBackoffRetry;
++import com.netflix.curator.test.TestingServer;
++import com.netflix.curator.utils.ZKPaths;
++import org.junit.After;
++import org.junit.Before;
++import org.junit.Test;
++import storm.kafka.trident.GlobalPartitionInformation;
++
++import java.util.HashMap;
++import java.util.Map;
++
++import static org.junit.Assert.assertEquals;
++
++/**
++ * Date: 16/05/2013
++ * Time: 20:35
++ */
++public class DynamicBrokersReaderTest {
++    private DynamicBrokersReader dynamicBrokersReader;
++    private String masterPath = "/brokers";
++    private String topic = "testing";
++    private CuratorFramework zookeeper;
++    private TestingServer server;
++
++    @Before
++    public void setUp() throws Exception {
++        server = new TestingServer();
++        String connectionString = server.getConnectString();
++        Map conf = new HashMap();
++        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
++        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
++        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
++        ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
++        zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
++        dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
++        zookeeper.start();
++    }
++
++    @After
++    public void tearDown() throws Exception {
++        dynamicBrokersReader.close();
++        zookeeper.close();
++        server.close();
++    }
++
++    private void addPartition(int id, String host, int port) throws Exception {
++        writePartitionId(id);
++        writeLeader(id, 0);
++        writeLeaderDetails(0, host, port);
++    }
++
++    private void addPartition(int id, int leader, String host, int port) throws Exception {
++        writePartitionId(id);
++        writeLeader(id, leader);
++        writeLeaderDetails(leader, host, port);
++    }
++
++    private void writePartitionId(int id) throws Exception {
++        String path = dynamicBrokersReader.partitionPath();
++        writeDataToPath(path, ("" + id));
++    }
++
++    private void writeDataToPath(String path, String data) throws Exception {
++        ZKPaths.mkdirs(zookeeper.getZookeeperClient().getZooKeeper(), path);
++        zookeeper.setData().forPath(path, data.getBytes());
++    }
++
++    private void writeLeader(int id, int leaderId) throws Exception {
++        String path = dynamicBrokersReader.partitionPath() + "/" + id + "/state";
++        String value = " { \"controller_epoch\":4, \"isr\":[ 1, 0 ], \"leader\":" + leaderId + ", \"leader_epoch\":1, \"version\":1 }";
++        writeDataToPath(path, value);
++    }
++
++    private void writeLeaderDetails(int leaderId, String host, int port) throws Exception {
++        String path = dynamicBrokersReader.brokerPath() + "/" + leaderId;
++        String value = "{ \"host\":\"" + host + "\", \"jmx_port\":9999, \"port\":" + port + ", \"version\":1 }";
++        writeDataToPath(path, value);
++    }
++
++    @Test
++    public void testGetBrokerInfo() throws Exception {
++        String host = "localhost";
++        int port = 9092;
++        int partition = 0;
++        addPartition(partition, host, port);
++        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
++        assertEquals(1, brokerInfo.getOrderedPartitions().size());
++        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
++        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
++    }
++
++
++    @Test
++    public void testMultiplePartitionsOnDifferentHosts() throws Exception {
++        String host = "localhost";
++        int port = 9092;
++        int secondPort = 9093;
++        int partition = 0;
++        int secondPartition = partition + 1;
++        addPartition(partition, 0, host, port);
++        addPartition(secondPartition, 1, host, secondPort);
++
++        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
++        assertEquals(2, brokerInfo.getOrderedPartitions().size());
++
++        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
++        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
++
++        assertEquals(secondPort, brokerInfo.getBrokerFor(secondPartition).port);
++        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
++    }
++
++
++    @Test
++    public void testMultiplePartitionsOnSameHost() throws Exception {
++        String host = "localhost";
++        int port = 9092;
++        int partition = 0;
++        int secondPartition = partition + 1;
++        addPartition(partition, 0, host, port);
++        addPartition(secondPartition, 0, host, port);
++
++        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
++        assertEquals(2, brokerInfo.getOrderedPartitions().size());
++
++        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
++        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
++
++        assertEquals(port, brokerInfo.getBrokerFor(secondPartition).port);
++        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
++    }
++
++    @Test
++    public void testSwitchHostForPartition() throws Exception {
++        String host = "localhost";
++        int port = 9092;
++        int partition = 0;
++        addPartition(partition, host, port);
++        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
++        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
++        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
++
++        String newHost = host + "switch";
++        int newPort = port + 1;
++        addPartition(partition, newHost, newPort);
++        brokerInfo = dynamicBrokersReader.getBrokerInfo();
++        assertEquals(newPort, brokerInfo.getBrokerFor(partition).port);
++        assertEquals(newHost, brokerInfo.getBrokerFor(partition).host);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/KafkaErrorTest.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/KafkaErrorTest.java
index 0000000,0000000..e5f9db2
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/KafkaErrorTest.java
@@@ -1,0 -1,0 +1,39 @@@
++package storm.kafka;
++
++import org.junit.Test;
++
++import static org.hamcrest.CoreMatchers.equalTo;
++import static org.hamcrest.CoreMatchers.is;
++import static org.junit.Assert.assertThat;
++
++/**
++ * Date: 12/01/2014
++ * Time: 18:09
++ */
++public class KafkaErrorTest {
++
++    @Test
++    public void getError() {
++        assertThat(KafkaError.getError(0), is(equalTo(KafkaError.NO_ERROR)));
++    }
++
++    @Test
++    public void offsetMetaDataTooLarge() {
++        assertThat(KafkaError.getError(12), is(equalTo(KafkaError.OFFSET_METADATA_TOO_LARGE)));
++    }
++
++    @Test
++    public void unknownNegative() {
++        assertThat(KafkaError.getError(-1), is(equalTo(KafkaError.UNKNOWN)));
++    }
++
++    @Test
++    public void unknownPositive() {
++        assertThat(KafkaError.getError(75), is(equalTo(KafkaError.UNKNOWN)));
++    }
++
++    @Test
++    public void unknown() {
++        assertThat(KafkaError.getError(13), is(equalTo(KafkaError.UNKNOWN)));
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
index 0000000,0000000..d2a44a4
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
@@@ -1,0 -1,0 +1,53 @@@
++package storm.kafka;
++
++import com.netflix.curator.framework.CuratorFramework;
++import com.netflix.curator.framework.CuratorFrameworkFactory;
++import com.netflix.curator.retry.ExponentialBackoffRetry;
++import com.netflix.curator.test.TestingServer;
++import kafka.server.KafkaServerStartable;
++
++import java.util.Properties;
++
++/**
++ * Date: 11/01/2014
++ * Time: 13:15
++ */
++public class KafkaTestBroker {
++
++    private final int port = 49123;
++    private KafkaServerStartable kafka;
++    private TestingServer server;
++    private String zookeeperConnectionString;
++
++    public KafkaTestBroker() {
++        try {
++            server = new TestingServer();
++            zookeeperConnectionString = server.getConnectString();
++            ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
++            CuratorFramework zookeeper = CuratorFrameworkFactory.newClient(zookeeperConnectionString, retryPolicy);
++            zookeeper.start();
++            Properties p = new Properties();
++            p.setProperty("zookeeper.connect", zookeeperConnectionString);
++            p.setProperty("broker.id", "0");
++            p.setProperty("port", "" + port);
++            kafka.server.KafkaConfig config = new kafka.server.KafkaConfig(p);
++            kafka = new KafkaServerStartable(config);
++            kafka.startup();
++        } catch (Exception ex) {
++            throw new RuntimeException("Could not start test broker", ex);
++        }
++    }
++
++    public String getBrokerConnectionString() {
++        return "localhost:" + port;
++    }
++
++    public int getPort() {
++        return port;
++    }
++
++    public void shutdown() {
++        kafka.shutdown();
++        server.stop();
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/KafkaUtilsTest.java
index 0000000,0000000..25fa618
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/KafkaUtilsTest.java
@@@ -1,0 -1,0 +1,221 @@@
++package storm.kafka;
++
++import backtype.storm.spout.SchemeAsMultiScheme;
++import backtype.storm.utils.Utils;
++import com.google.common.collect.ImmutableMap;
++import kafka.api.OffsetRequest;
++import kafka.javaapi.consumer.SimpleConsumer;
++import kafka.javaapi.message.ByteBufferMessageSet;
++import kafka.javaapi.producer.Producer;
++import kafka.message.MessageAndOffset;
++import kafka.producer.KeyedMessage;
++import kafka.producer.ProducerConfig;
++import org.junit.After;
++import org.junit.Before;
++import org.junit.Test;
++import storm.kafka.trident.GlobalPartitionInformation;
++
++import java.util.List;
++import java.util.Properties;
++
++import static org.hamcrest.CoreMatchers.equalTo;
++import static org.hamcrest.CoreMatchers.is;
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertThat;
++
++public class KafkaUtilsTest {
++
++    private KafkaTestBroker broker;
++    private SimpleConsumer simpleConsumer;
++    private KafkaConfig config;
++    private BrokerHosts brokerHosts;
++
++    @Before
++    public void setup() {
++        broker = new KafkaTestBroker();
++        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
++        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
++        brokerHosts = new StaticHosts(globalPartitionInformation);
++        config = new KafkaConfig(brokerHosts, "testTopic");
++        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
++    }
++
++    @After
++    public void shutdown() {
++        simpleConsumer.close();
++        broker.shutdown();
++    }
++
++
++    @Test(expected = FailedFetchException.class)
++    public void topicDoesNotExist() throws Exception {
++        KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), 0);
++    }
++
++    @Test(expected = FailedFetchException.class)
++    public void brokerIsDown() throws Exception {
++        int port = broker.getPort();
++        broker.shutdown();
++        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", port, 100, 1024, "testClient");
++        KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), OffsetRequest.LatestTime());
++    }
++
++    @Test
++    public void fetchMessage() throws Exception {
++        String value = "test";
++        createTopicAndSendMessage(value);
++        long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
++        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
++                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
++        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
++        assertThat(message, is(equalTo(value)));
++    }
++
++    @Test(expected = FailedFetchException.class)
++    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingDisabled() throws Exception {
++        config.useStartOffsetTimeIfOffsetOutOfRange = false;
++        KafkaUtils.fetchMessages(config, simpleConsumer,
++                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), -99);
++    }
++
++    @Test
++    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingEnabled() throws Exception {
++        config = new KafkaConfig(brokerHosts, "newTopic");
++        String value = "test";
++        createTopicAndSendMessage(value);
++        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
++                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), -99);
++        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
++        assertThat(message, is(equalTo(value)));
++    }
++
++    @Test
++    public void getOffsetFromConfigAndDontForceFromStart() {
++        config.forceFromStart = false;
++        config.startOffsetTime = OffsetRequest.EarliestTime();
++        createTopicAndSendMessage();
++        long latestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime());
++        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
++        assertThat(latestOffset, is(equalTo(offsetFromConfig)));
++    }
++
++    @Test
++    public void getOffsetFromConfigAndFroceFromStart() {
++        config.forceFromStart = true;
++        config.startOffsetTime = OffsetRequest.EarliestTime();
++        createTopicAndSendMessage();
++        long earliestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
++        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
++        assertThat(earliestOffset, is(equalTo(offsetFromConfig)));
++    }
++
++    @Test
++    public void generateTuplesWithoutKeyAndKeyValueScheme() {
++        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
++        runGetValueOnlyTuplesTest();
++    }
++
++    @Test
++    public void generateTuplesWithKeyAndKeyValueScheme() {
++        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
++        String value = "value";
++        String key = "key";
++        createTopicAndSendMessage(key, value);
++        ByteBufferMessageSet messageAndOffsets = getLastMessage();
++        for (MessageAndOffset msg : messageAndOffsets) {
++            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
++            assertEquals(ImmutableMap.of(key, value), lists.iterator().next().get(0));
++        }
++    }
++
++    @Test
++    public void generateTupelsWithValueScheme() {
++        config.scheme = new SchemeAsMultiScheme(new StringScheme());
++        runGetValueOnlyTuplesTest();
++    }
++
++    @Test
++    public void generateTuplesWithValueSchemeAndKeyValueMessage() {
++        config.scheme = new SchemeAsMultiScheme(new StringScheme());
++        String value = "value";
++        String key = "key";
++        createTopicAndSendMessage(key, value);
++        ByteBufferMessageSet messageAndOffsets = getLastMessage();
++        for (MessageAndOffset msg : messageAndOffsets) {
++            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
++            assertEquals(value, lists.iterator().next().get(0));
++        }
++    }
++
++    private ByteBufferMessageSet getLastMessage() {
++        long offsetOfLastMessage = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
++        return KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offsetOfLastMessage);
++    }
++
++    private void runGetValueOnlyTuplesTest() {
++        String value = "value";
++        createTopicAndSendMessage(null, value);
++        ByteBufferMessageSet messageAndOffsets = getLastMessage();
++        for (MessageAndOffset msg : messageAndOffsets) {
++            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
++            assertEquals(value, lists.iterator().next().get(0));
++        }
++    }
++
++
++    private void createTopicAndSendMessage() {
++        createTopicAndSendMessage(null, "someValue");
++    }
++
++    private void createTopicAndSendMessage(String value) {
++        createTopicAndSendMessage(null, value);
++    }
++
++    private void createTopicAndSendMessage(String key, String value) {
++        Properties p = new Properties();
++        p.setProperty("metadata.broker.list", "localhost:49123");
++        p.setProperty("serializer.class", "kafka.serializer.StringEncoder");
++        ProducerConfig producerConfig = new ProducerConfig(p);
++        Producer<String, String> producer = new Producer<String, String>(producerConfig);
++        producer.send(new KeyedMessage<String, String>(config.topic, key, value));
++    }
++
++
++    @Test
++    public void assignOnePartitionPerTask() {
++        runPartitionToTaskMappingTest(16, 1);
++    }
++
++    @Test
++    public void assignTwoPartitionsPerTask() {
++        runPartitionToTaskMappingTest(16, 2);
++    }
++
++    @Test
++    public void assignAllPartitionsToOneTask() {
++        runPartitionToTaskMappingTest(32, 32);
++    }
++
++
++    public void runPartitionToTaskMappingTest(int numPartitions, int partitionsPerTask) {
++        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(numPartitions);
++        int numTasks = numPartitions / partitionsPerTask;
++        for (int i = 0 ; i < numTasks ; i++) {
++            assertEquals(partitionsPerTask, KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, numTasks, i).size());
++        }
++    }
++
++    @Test
++    public void moreTasksThanPartitions() {
++        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(1);
++        int numTasks = 2;
++        assertEquals(1, KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, numTasks, 0).size());
++        assertEquals(0, KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, numTasks, 1).size());
++    }
++
++    @Test (expected = IllegalArgumentException.class )
++    public void assignInvalidTask() {
++        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
++        KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, 1, 1);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/StringKeyValueSchemeTest.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/StringKeyValueSchemeTest.java
index 0000000,0000000..4413c7b
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/StringKeyValueSchemeTest.java
@@@ -1,0 -1,0 +1,38 @@@
++package storm.kafka;
++
++import backtype.storm.tuple.Fields;
++import com.google.common.collect.ImmutableMap;
++import org.junit.Test;
++
++import java.util.Arrays;
++
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertTrue;
++
++public class StringKeyValueSchemeTest {
++
++    private StringKeyValueScheme scheme = new StringKeyValueScheme();
++
++    @Test
++    public void testDeserialize() throws Exception {
++        assertEquals(Arrays.asList("test"), scheme.deserialize("test".getBytes()));
++    }
++
++    @Test
++    public void testGetOutputFields() throws Exception {
++        Fields outputFields = scheme.getOutputFields();
++        assertTrue(outputFields.contains(StringScheme.STRING_SCHEME_KEY));
++        assertEquals(1, outputFields.size());
++    }
++
++    @Test
++    public void testDeserializeWithNullKeyAndValue() throws Exception {
++        assertEquals(Arrays.asList("test"), scheme.deserializeKeyAndValue(null, "test".getBytes()));
++    }
++
++    @Test
++    public void testDeserializeWithKeyAndValue() throws Exception {
++        assertEquals(Arrays.asList(ImmutableMap.of("key", "test")),
++                scheme.deserializeKeyAndValue("key".getBytes(), "test".getBytes()));
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/TestUtils.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/TestUtils.java
index 0000000,0000000..860d96d
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/TestUtils.java
@@@ -1,0 -1,0 +1,20 @@@
++package storm.kafka;
++
++import storm.kafka.trident.GlobalPartitionInformation;
++
++public class TestUtils {
++
++    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions) {
++        return buildPartitionInfo(numPartitions, 9092);
++    }
++
++
++    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions, int brokerPort) {
++        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
++        for (int i = 0; i < numPartitions; i++) {
++            globalPartitionInformation.addPartition(i, Broker.fromString("broker-" + i + " :" + brokerPort));
++        }
++        return globalPartitionInformation;
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/ZkCoordinatorTest.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/ZkCoordinatorTest.java
index 0000000,0000000..c08ce82
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/ZkCoordinatorTest.java
@@@ -1,0 -1,0 +1,130 @@@
++package storm.kafka;
++
++import backtype.storm.Config;
++import com.netflix.curator.test.TestingServer;
++import kafka.javaapi.consumer.SimpleConsumer;
++import org.junit.After;
++import org.junit.Before;
++import org.junit.Test;
++import org.mockito.Mock;
++import org.mockito.MockitoAnnotations;
++
++import java.util.*;
++
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertNotEquals;
++import static org.mockito.Matchers.any;
++import static org.mockito.Matchers.anyInt;
++import static org.mockito.Mockito.when;
++
++public class ZkCoordinatorTest {
++
++
++    @Mock
++    private DynamicBrokersReader reader;
++
++    @Mock
++    private DynamicPartitionConnections dynamicPartitionConnections;
++
++    private KafkaTestBroker broker = new KafkaTestBroker();
++    private TestingServer server;
++    private Map stormConf = new HashMap();
++    private SpoutConfig spoutConfig;
++    private ZkState state;
++    private SimpleConsumer simpleConsumer;
++
++    @Before
++    public void setUp() throws Exception {
++        MockitoAnnotations.initMocks(this);
++        server = new TestingServer();
++        String connectionString = server.getConnectString();
++        ZkHosts hosts = new ZkHosts(connectionString);
++        hosts.refreshFreqSecs = 1;
++        spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
++        Map conf = buildZookeeperConfig(server);
++        state = new ZkState(conf);
++        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
++        when(dynamicPartitionConnections.register(any(Broker.class), anyInt())).thenReturn(simpleConsumer);
++    }
++
++    private Map buildZookeeperConfig(TestingServer server) {
++        Map conf = new HashMap();
++        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, server.getPort());
++        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, Arrays.asList("localhost"));
++        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 20000);
++        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 3);
++        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 30);
++        return conf;
++    }
++
++    @After
++    public void shutdown() throws Exception {
++        simpleConsumer.close();
++        broker.shutdown();
++        server.stop();
++    }
++
++    @Test
++    public void testOnePartitionPerTask() throws Exception {
++        int totalTasks = 64;
++        int partitionsPerTask = 1;
++        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
++        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfo(totalTasks));
++        for (ZkCoordinator coordinator : coordinatorList) {
++            List<PartitionManager> myManagedPartitions = coordinator.getMyManagedPartitions();
++            assertEquals(partitionsPerTask, myManagedPartitions.size());
++            assertEquals(coordinator._taskIndex, myManagedPartitions.get(0).getPartition().partition);
++        }
++    }
++
++
++    @Test
++    public void testPartitionsChange() throws Exception {
++        final int totalTasks = 64;
++        int partitionsPerTask = 2;
++        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
++        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfo(totalTasks, 9092));
++        List<List<PartitionManager>> partitionManagersBeforeRefresh = getPartitionManagers(coordinatorList);
++        waitForRefresh();
++        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfo(totalTasks, 9093));
++        List<List<PartitionManager>> partitionManagersAfterRefresh = getPartitionManagers(coordinatorList);
++        assertEquals(partitionManagersAfterRefresh.size(), partitionManagersAfterRefresh.size());
++        Iterator<List<PartitionManager>> iterator = partitionManagersAfterRefresh.iterator();
++        for (List<PartitionManager> partitionManagersBefore : partitionManagersBeforeRefresh) {
++            List<PartitionManager> partitionManagersAfter = iterator.next();
++            assertPartitionsAreDifferent(partitionManagersBefore, partitionManagersAfter, partitionsPerTask);
++        }
++    }
++
++    private void assertPartitionsAreDifferent(List<PartitionManager> partitionManagersBefore, List<PartitionManager> partitionManagersAfter, int partitionsPerTask) {
++        assertEquals(partitionsPerTask, partitionManagersBefore.size());
++        assertEquals(partitionManagersBefore.size(), partitionManagersAfter.size());
++        for (int i = 0; i < partitionsPerTask; i++) {
++            assertNotEquals(partitionManagersBefore.get(i).getPartition(), partitionManagersAfter.get(i).getPartition());
++        }
++
++    }
++
++    private List<List<PartitionManager>> getPartitionManagers(List<ZkCoordinator> coordinatorList) {
++        List<List<PartitionManager>> partitions = new ArrayList();
++        for (ZkCoordinator coordinator : coordinatorList) {
++            partitions.add(coordinator.getMyManagedPartitions());
++        }
++        return partitions;
++    }
++
++    private void waitForRefresh() throws InterruptedException {
++        Thread.sleep(((ZkHosts) spoutConfig.hosts).refreshFreqSecs * 1000 + 1);
++    }
++
++    private List<ZkCoordinator> buildCoordinators(int totalTasks) {
++        List<ZkCoordinator> coordinatorList = new ArrayList<ZkCoordinator>();
++        for (int i = 0; i < totalTasks; i++) {
++            ZkCoordinator coordinator = new ZkCoordinator(dynamicPartitionConnections, stormConf, spoutConfig, state, i, totalTasks, "test-id", reader);
++            coordinatorList.add(coordinator);
++        }
++        return coordinatorList;
++    }
++
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/test/storm/kafka/bolt/KafkaBoltTest.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/test/storm/kafka/bolt/KafkaBoltTest.java
index 0000000,0000000..fa5a104
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/test/storm/kafka/bolt/KafkaBoltTest.java
@@@ -1,0 -1,0 +1,171 @@@
++package storm.kafka.bolt;
++
++import backtype.storm.Config;
++import backtype.storm.task.GeneralTopologyContext;
++import backtype.storm.task.IOutputCollector;
++import backtype.storm.task.OutputCollector;
++import backtype.storm.topology.TopologyBuilder;
++import backtype.storm.tuple.Fields;
++import backtype.storm.tuple.Tuple;
++import backtype.storm.tuple.TupleImpl;
++import backtype.storm.tuple.Values;
++import backtype.storm.utils.Utils;
++import kafka.api.OffsetRequest;
++import kafka.javaapi.consumer.SimpleConsumer;
++import kafka.javaapi.message.ByteBufferMessageSet;
++import kafka.message.Message;
++import kafka.message.MessageAndOffset;
++import org.junit.After;
++import org.junit.Before;
++import org.junit.Test;
++import org.mockito.Mock;
++import org.mockito.MockitoAnnotations;
++import storm.kafka.*;
++import storm.kafka.trident.GlobalPartitionInformation;
++
++import java.nio.ByteBuffer;
++import java.util.HashMap;
++import java.util.Properties;
++
++import static org.junit.Assert.assertEquals;
++import static org.mockito.Mockito.verify;
++
++public class KafkaBoltTest {
++
++    private static final String TEST_TOPIC = "test-topic";
++    private KafkaTestBroker broker;
++    private KafkaBolt bolt;
++    private Config config = new Config();
++    private KafkaConfig kafkaConfig;
++    private SimpleConsumer simpleConsumer;
++
++    @Mock
++    private IOutputCollector collector;
++
++    @Before
++    public void initMocks() {
++        MockitoAnnotations.initMocks(this);
++        broker = new KafkaTestBroker();
++        setupKafkaConsumer();
++        config.put(KafkaBolt.TOPIC, TEST_TOPIC);
++        bolt = generateStringSerializerBolt();
++    }
++
++    @After
++    public void shutdown() {
++        simpleConsumer.close();
++        broker.shutdown();
++    }
++
++
++    private void setupKafkaConsumer() {
++        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
++        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
++        BrokerHosts brokerHosts = new StaticHosts(globalPartitionInformation);
++        kafkaConfig = new KafkaConfig(brokerHosts, TEST_TOPIC);
++        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
++    }
++
++    @Test
++    public void executeWithKey() throws Exception {
++        String message = "value-123";
++        String key = "key-123";
++        Tuple tuple = generateTestTuple(key, message);
++        bolt.execute(tuple);
++        verify(collector).ack(tuple);
++        verifyMessage(key, message);
++    }
++
++    @Test
++    public void executeWithByteArrayKeyAndMessage() {
++        bolt = generateDefaultSerializerBolt();
++        String keyString = "test-key";
++        String messageString = "test-message";
++        byte[] key = keyString.getBytes();
++        byte[] message = messageString.getBytes();
++        Tuple tuple = generateTestTuple(key, message);
++        bolt.execute(tuple);
++        verify(collector).ack(tuple);
++        verifyMessage(keyString, messageString);
++    }
++
++    private KafkaBolt generateStringSerializerBolt() {
++        KafkaBolt bolt = new KafkaBolt();
++        Properties props = new Properties();
++        props.put("metadata.broker.list", broker.getBrokerConnectionString());
++        props.put("request.required.acks", "1");
++        props.put("serializer.class", "kafka.serializer.StringEncoder");
++        config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
++        bolt.prepare(config, null, new OutputCollector(collector));
++        return bolt;
++    }
++
++    private KafkaBolt generateDefaultSerializerBolt() {
++        KafkaBolt bolt = new KafkaBolt();
++        Properties props = new Properties();
++        props.put("metadata.broker.list", broker.getBrokerConnectionString());
++        props.put("request.required.acks", "1");
++        config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
++        bolt.prepare(config, null, new OutputCollector(collector));
++        return bolt;
++    }
++
++    @Test
++    public void executeWithoutKey() throws Exception {
++        String message = "value-234";
++        Tuple tuple = generateTestTuple(message);
++        bolt.execute(tuple);
++        verify(collector).ack(tuple);
++        verifyMessage(null, message);
++    }
++
++
++    @Test
++    public void executeWithBrokerDown() throws Exception {
++        broker.shutdown();
++        String message = "value-234";
++        Tuple tuple = generateTestTuple(message);
++        bolt.execute(tuple);
++        verify(collector).ack(tuple);
++    }
++
++
++    private boolean verifyMessage(String key, String message) {
++        long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
++        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
++                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
++        MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
++        Message kafkaMessage = messageAndOffset.message();
++        ByteBuffer messageKeyBuffer = kafkaMessage.key();
++        String keyString = null;
++        String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
++        if (messageKeyBuffer != null) {
++            keyString = new String(Utils.toByteArray(messageKeyBuffer));
++        }
++        assertEquals(key, keyString);
++        assertEquals(message, messageString);
++        return true;
++    }
++
++    private Tuple generateTestTuple(Object key, Object message) {
++        TopologyBuilder builder = new TopologyBuilder();
++        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
++            @Override
++            public Fields getComponentOutputFields(String componentId, String streamId) {
++                return new Fields("key", "message");
++            }
++        };
++        return new TupleImpl(topologyContext, new Values(key, message), 1, "");
++    }
++
++    private Tuple generateTestTuple(Object message) {
++        TopologyBuilder builder = new TopologyBuilder();
++        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
++            @Override
++            public Fields getComponentOutputFields(String componentId, String streamId) {
++                return new Fields("message");
++            }
++        };
++        return new TupleImpl(topologyContext, new Values(message), 1, "");
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index e4129ed,e6adc5d..82d5f88
--- a/pom.xml
+++ b/pom.xml
@@@ -1,471 -1,105 +1,472 @@@
  <?xml version="1.0" encoding="UTF-8"?>
 -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 -         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
 +<!--
 + Licensed to the Apache Software Foundation (ASF) under one or more
 + contributor license agreements.  See the NOTICE file distributed with
 + this work for additional information regarding copyright ownership.
 + The ASF licenses this file to You under the Apache License, Version 2.0
 + (the "License"); you may not use this file except in compliance with
 + the License.  You may obtain a copy of the License at
 +
 +     http://www.apache.org/licenses/LICENSE-2.0
 +
 + Unless required by applicable law or agreed to in writing, software
 + distributed under the License is distributed on an "AS IS" BASIS,
 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + See the License for the specific language governing permissions and
 + limitations under the License.
 +-->
 +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
      <modelVersion>4.0.0</modelVersion>
 +
 +    <parent>
 +        <groupId>org.apache</groupId>
 +        <artifactId>apache</artifactId>
 +        <version>10</version>
 +    </parent>
 +
 +
      <groupId>org.apache.storm</groupId>
 -    <artifactId>storm-kafka</artifactId>
 -    <packaging>jar</packaging>
 +    <artifactId>storm</artifactId>
      <version>0.9.2-incubating-SNAPSHOT</version>
 -    <name>storm-kafka</name>
 -    <description>Storm Spouts for Apache Kafka</description>
 +    <packaging>pom</packaging>
 +    <name>Storm</name>
 +    <description>Distributed and fault-tolerant realtime computation</description>
 +    <url>http://storm.incubator.apache.org</url>
 +    <licenses>
 +        <license>
 +            <name>The Apache Software License, Version 2.0</name>
 +            <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
 +        </license>
 +    </licenses>
 +
 +    <mailingLists>
 +        <mailingList>
 +            <name>Storm user mailing list</name>
 +            <subscribe>user-subscribe@storm.incubator.apache.org</subscribe>
 +            <unsubscribe>user-unsubscribe@storm.incubator.apache.org</unsubscribe>
 +            <post>user@storm.incubator.apache.org</post>
 +            <archive>http://mail-archives.apache.org/mod_mbox/incubator-storm-user/</archive>
 +        </mailingList>
 +        <mailingList>
 +            <name>Storm developer mailing list</name>
 +            <subscribe>dev-subscribe@storm.incubator.apache.org</subscribe>
 +            <unsubscribe>dev-unsubscribe@storm.incubator.apache.org</unsubscribe>
 +            <post>dev@storm.incubator.apache.org</post>
 +            <archive>http://mail-archives.apache.org/mod_mbox/incubator-storm-dev/</archive>
 +        </mailingList>
 +    </mailingLists>
 +
 +    <developers>
 +        <developer>
 +            <id>nathanmarz</id>
 +            <name>Nathan Marz</name>
 +            <email>nathan@nathanmarz.com</email>
 +            <roles>
 +                <role>Committer</role>
 +            </roles>
 +            <timezone>-8</timezone>
 +        </developer>
 +        <developer>
 +            <id>ptgoetz</id>
 +            <name>P. Taylor Goetz</name>
 +            <email>ptgoetz@apache.org</email>
 +            <roles>
 +                <role>Committer</role>
 +            </roles>
 +            <timezone>-5</timezone>
 +        </developer>
 +        <developer>
 +            <id>xumingming</id>
 +            <name>James Xu</name>
 +            <email>xumingming@apache.org</email>
 +            <roles>
 +                <role>Committer</role>
 +            </roles>
 +            <timezone />
 +        </developer>
 +        <developer>
 +            <id>afeng</id>
 +            <name>Andy Feng</name>
 +            <email>afeng@apache.org</email>
 +            <roles>
 +                <role>Committer</role>
 +            </roles>
 +            <timezone>-8</timezone>
 +        </developer>
 +        <developer>
 +            <id>davidlao</id>
 +            <name>David Lao</name>
 +            <email>davidlao@microsoft.com</email>
 +            <roles>
 +                <role>Committer</role>
 +            </roles>
 +            <timezone>-8</timezone>
 +        </developer>
 +        <developer>
 +            <id>mrflip</id>
 +            <name>Flip Kromer</name>
 +            <email>mrflip@apache.org</email>
 +            <roles>
 +                <role>Committer</role>
 +            </roles>
 +            <timezone />
 +        </developer>
 +        <developer>
 +            <id>jjackson</id>
 +            <name>Jason Jackson</name>
 +            <email>jason@cvk.ca</email>
 +            <roles>
 +                <role>Committer</role>
 +            </roles>
 +            <timezone>-8</timezone>
 +        </developer>
 +    </developers>
 +
 +
 +    <prerequisites>
 +        <maven>3.0.0</maven>
 +    </prerequisites>
 +
 +    <modules>
 +        <module>storm-buildtools/maven-shade-clojure-transformer</module>
 +        <module>storm-core</module>
 +        <module>examples/storm-starter</module>
++        <module>external/storm-kafka</module>
 +    </modules>
 +
 +    <scm>
 +        <connection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-storm.git</connection>
 +        <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-storm.git</developerConnection>
 +        <tag>HEAD</tag>
 +        <url>https://git-wip-us.apache.org/repos/asf/incubator-storm</url>
 +    </scm>
 +
 +    <issueManagement>
 +        <system>jira</system>
 +        <url>https://issues.apache.org/jira/browse/STORM</url>
 +    </issueManagement>
 +
      <properties>
 -        <scalaVersion>2.9.2</scalaVersion>
 -        <kafkaArtifact>kafka_2.9.2</kafkaArtifact>
 -        <envClassifier></envClassifier>
 +        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
 +        <test.extra.args>-Djava.net.preferIPv4Stack=true</test.extra.args>
 +
 +        <!-- dependency versions -->
 +        <clojure.version>1.4.0</clojure.version>
 +        <compojure.version>1.1.3</compojure.version>
 +        <hiccup.version>0.3.6</hiccup.version>
 +        <commons-io.verson>1.4</commons-io.verson>
 +        <commons-lang.version>2.5</commons-lang.version>
 +        <commons-exec.version>1.1</commons-exec.version>
 +        <clj-time.version>0.4.1</clj-time.version>
 +        <curator.version>1.3.3</curator.version>
 +        <json-simple.version>1.1</json-simple.version>
 +        <ring.version>0.3.11</ring.version>
 +        <clojure.tools.logging.version>0.2.3</clojure.tools.logging.version>
 +        <clojure.math.numeric-tower.version>0.0.1</clojure.math.numeric-tower.version>
 +        <carbonite.version>1.4.0</carbonite.version>
 +        <snakeyaml.version>1.11</snakeyaml.version>
 +        <httpclient.version>4.1.1</httpclient.version>
 +        <clojure.tools.cli.version>0.2.2</clojure.tools.cli.version>
 +        <disruptor.version>2.10.1</disruptor.version>
 +        <jgrapht.version>0.9.0</jgrapht.version>
 +        <guava.version>13.0</guava.version>
 +        <logback-classic.version>1.0.6</logback-classic.version>
 +        <log4j-over-slf4j.version>1.6.6</log4j-over-slf4j.version>
 +        <netty.version>3.6.3.Final</netty.version>
 +        <clojure.tools.nrepl.version>0.2.3</clojure.tools.nrepl.version>
 +        <clojure-complete.version>0.2.3</clojure-complete.version>
 +        <mockito.version>1.9.5</mockito.version>
 +        <reply.version>0.3.0</reply.version>
 +        <zookeeper.version>3.4.5</zookeeper.version>
 +
      </properties>
 -    <build>
 -        <plugins>
  
 -        </plugins>
 -        <sourceDirectory>src/jvm</sourceDirectory>
 -        <testSourceDirectory>src/test</testSourceDirectory>
 -    </build>
 +    <profiles>
 +        <profile>
 +            <id>sign</id>
 +            <build>
 +                <plugins>
 +                    <plugin>
 +                        <groupId>org.apache.maven.plugins</groupId>
 +                        <artifactId>maven-gpg-plugin</artifactId>
 +                        <executions>
 +                            <execution>
 +                                <id>sign-artifacts</id>
 +                                <phase>verify</phase>
 +                                <goals>
 +                                    <goal>sign</goal>
 +                                </goals>
 +                            </execution>
 +                        </executions>
 +                    </plugin>
 +                </plugins>
 +            </build>
 +        </profile>
 +        <profile>
 +            <id>dist</id>
 +            <modules>
 +                <module>storm-dist/binary</module>
 +                <module>storm-dist/source</module>
 +            </modules>
 +            <build>
 +                <plugins>
 +                    <plugin>
 +                        <groupId>org.apache.maven.plugins</groupId>
 +                        <artifactId>maven-source-plugin</artifactId>
 +                        <executions>
 +                            <execution>
 +                                <id>attach-sources</id>
 +                                <goals>
 +                                    <goal>jar</goal>
 +                                </goals>
 +                            </execution>
 +                        </executions>
 +                    </plugin>
 +                    <plugin>
 +                        <groupId>org.apache.maven.plugins</groupId>
 +                        <artifactId>maven-javadoc-plugin</artifactId>
 +                        <executions>
 +                            <execution>
 +                                <id>attach-javadocs</id>
 +                                <goals>
 +                                    <goal>jar</goal>
 +                                </goals>
 +                            </execution>
 +                        </executions>
 +                    </plugin>
 +                    <plugin>
 +                        <groupId>org.apache.maven.plugins</groupId>
 +                        <artifactId>maven-jar-plugin</artifactId>
 +                        <configuration>
 +                            <archive>
 +                                <manifest>
 +                                    <addDefaultImplementationEntries>true</addDefaultImplementationEntries>
 +                                    <addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
 +                                </manifest>
 +                            </archive>
 +                        </configuration>
 +                    </plugin>
 +                </plugins>
 +            </build>
 +        </profile>
 +
 +    </profiles>
 +
 +    <distributionManagement>
 +        <site>
 +            <id>storm.maven.website</id>
 +            <name>Storm Website</name>
 +            <url>file:///tmp/site</url>
 +        </site>
 +    </distributionManagement>
 +
 +    <dependencyManagement>
 +        <dependencies>
 +            <dependency>
 +                <groupId>org.clojure</groupId>
 +                <artifactId>clojure</artifactId>
 +                <version>${clojure.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>commons-io</groupId>
 +                <artifactId>commons-io</artifactId>
 +                <version>${commons-io.verson}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.apache.commons</groupId>
 +                <artifactId>commons-exec</artifactId>
 +                <version>${commons-exec.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>commons-lang</groupId>
 +                <artifactId>commons-lang</artifactId>
 +                <version>${commons-lang.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>clj-time</groupId>
 +                <artifactId>clj-time</artifactId>
 +                <version>${clj-time.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.apache.zookeeper</groupId>
 +                <artifactId>zookeeper</artifactId>
 +                <version>${zookeeper.version}</version>
 +                <exclusions>
 +                    <exclusion>
 +                        <groupId>com.sun.jmx</groupId>
 +                        <artifactId>jmxri</artifactId>
 +                    </exclusion>
 +                    <exclusion>
 +                        <groupId>com.sun.jdmk</groupId>
 +                        <artifactId>jmxtools</artifactId>
 +                    </exclusion>
 +                    <exclusion>
 +                        <groupId>javax.jms</groupId>
 +                        <artifactId>jms</artifactId>
 +                    </exclusion>
 +                    <exclusion>
 +                        <groupId>org.slf4j</groupId>
 +                        <artifactId>slf4j-api</artifactId>
 +                    </exclusion>
 +                    <exclusion>
 +                        <groupId>org.slf4j</groupId>
 +                        <artifactId>slf4j-log4j12</artifactId>
 +                    </exclusion>
 +                    <exclusion>
 +                        <groupId>log4j</groupId>
 +                        <artifactId>log4j</artifactId>
 +                    </exclusion>
 +                </exclusions>
 +            </dependency>
 +            <dependency>
 +                <groupId>com.netflix.curator</groupId>
 +                <artifactId>curator-framework</artifactId>
 +                <version>${curator.version}</version>
 +                <exclusions>
 +                    <exclusion>
 +                        <artifactId>log4j</artifactId>
 +                        <groupId>log4j</groupId>
 +                    </exclusion>
 +                </exclusions>
 +            </dependency>
 +            <dependency>
 +                <groupId>com.googlecode.json-simple</groupId>
 +                <artifactId>json-simple</artifactId>
 +                <version>${json-simple.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>compojure</groupId>
 +                <artifactId>compojure</artifactId>
 +                <version>${compojure.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>hiccup</groupId>
 +                <artifactId>hiccup</artifactId>
 +                <version>${hiccup.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>ring</groupId>
 +                <artifactId>ring-devel</artifactId>
 +                <version>${ring.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>ring</groupId>
 +                <artifactId>ring-jetty-adapter</artifactId>
 +                <version>${ring.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.clojure</groupId>
 +                <artifactId>tools.logging</artifactId>
 +                <version>${clojure.tools.logging.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.clojure</groupId>
 +                <artifactId>math.numeric-tower</artifactId>
 +                <version>${clojure.math.numeric-tower.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>com.twitter</groupId>
 +                <artifactId>carbonite</artifactId>
 +                <version>${carbonite.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.yaml</groupId>
 +                <artifactId>snakeyaml</artifactId>
 +                <version>${snakeyaml.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.apache.httpcomponents</groupId>
 +                <artifactId>httpclient</artifactId>
 +                <version>${httpclient.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.clojure</groupId>
 +                <artifactId>tools.cli</artifactId>
 +                <version>${clojure.tools.cli.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>com.googlecode.disruptor</groupId>
 +                <artifactId>disruptor</artifactId>
 +                <version>${disruptor.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.jgrapht</groupId>
 +                <artifactId>jgrapht-core</artifactId>
 +                <version>${jgrapht.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>com.google.guava</groupId>
 +                <artifactId>guava</artifactId>
 +                <version>${guava.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>ch.qos.logback</groupId>
 +                <artifactId>logback-classic</artifactId>
 +                <version>${logback-classic.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.slf4j</groupId>
 +                <artifactId>log4j-over-slf4j</artifactId>
 +                <version>${log4j-over-slf4j.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>io.netty</groupId>
 +                <artifactId>netty</artifactId>
 +                <version>${netty.version}</version>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.clojure</groupId>
 +                <artifactId>tools.nrepl</artifactId>
 +                <version>${clojure.tools.nrepl.version}</version>
 +                <scope>test</scope>
 +                <exclusions>
 +                    <exclusion>
 +                        <artifactId>clojure</artifactId>
 +                        <groupId>org.clojure</groupId>
 +                    </exclusion>
 +                </exclusions>
 +            </dependency>
 +            <dependency>
 +                <groupId>clojure-complete</groupId>
 +                <artifactId>clojure-complete</artifactId>
 +                <version>${clojure-complete.version}</version>
 +                <scope>test</scope>
 +                <exclusions>
 +                    <exclusion>
 +                        <artifactId>clojure</artifactId>
 +                        <groupId>org.clojure</groupId>
 +                    </exclusion>
 +                </exclusions>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.mockito</groupId>
 +                <artifactId>mockito-all</artifactId>
 +                <version>${mockito.version}</version>
 +                <scope>test</scope>
 +            </dependency>
 +            <dependency>
 +                <groupId>org.apache.thrift</groupId>
 +                <artifactId>libthrift</artifactId>
 +                <version>0.7.0</version>
 +                <scope>compile</scope>
 +            </dependency>
 +			<!-- used by examples/storm-starter -->
 +		    <dependency>
 +		      <groupId>junit</groupId>
 +		      <artifactId>junit</artifactId>
 +		      <version>3.8.1</version>
 +		      <scope>test</scope>
 +		    </dependency>
 +        </dependencies>
 +    </dependencyManagement>
 +
 +
      <dependencies>
 +        <!-- reply dependency adds leiningen-style REPL -->
          <dependency>
 -            <groupId>org.mockito</groupId>
 -            <artifactId>mockito-all</artifactId>
 -            <version>1.9.0</version>
 -            <scope>test</scope>
 -        </dependency>
 -        <dependency>
 -            <groupId>org.scala-lang</groupId>
 -            <artifactId>scala-library</artifactId>
 -            <version>${scalaVersion}</version>
 -        </dependency>
 -        <dependency>
 -            <groupId>junit</groupId>
 -            <artifactId>junit</artifactId>
 -            <version>4.11</version>
 -            <scope>test</scope>
 -        </dependency>
 -        <dependency>
 -            <groupId>com.netflix.curator</groupId>
 -            <artifactId>curator-framework</artifactId>
 -            <version>1.3.3</version>
 -            <exclusions>
 -                <exclusion>
 -                    <groupId>log4j</groupId>
 -                    <artifactId>log4j</artifactId>
 -                </exclusion>
 -                <exclusion>
 -                    <groupId>org.slf4j</groupId>
 -                    <artifactId>slf4j-log4j12</artifactId>
 -                </exclusion>
 -            </exclusions>
 -        </dependency>
 -        <dependency>
 -            <groupId>com.netflix.curator</groupId>
 -            <artifactId>curator-recipes</artifactId>
 -            <version>1.3.3</version>
 -            <exclusions>
 -                <exclusion>
 -                    <groupId>log4j</groupId>
 -                    <artifactId>log4j</artifactId>
 -                </exclusion>
 -            </exclusions>
 -            <scope>test</scope>
 -        </dependency>
 -        <dependency>
 -            <groupId>com.netflix.curator</groupId>
 -            <artifactId>curator-test</artifactId>
 -            <version>1.3.3</version>
 -            <exclusions>
 -                <exclusion>
 -                    <groupId>log4j</groupId>
 -                    <artifactId>log4j</artifactId>
 -                </exclusion>
 -                <exclusion>
 -                    <groupId>org.testng</groupId>
 -                    <artifactId>testng</artifactId>
 -                </exclusion>
 -            </exclusions>
 -            <scope>test</scope>
 -        </dependency>
 -        <dependency>
 -            <groupId>org.apache.kafka</groupId>
 -            <artifactId>${kafkaArtifact}</artifactId>
 -            <version>0.8.0</version>
 -            <exclusions>
 -                <exclusion>
 -                    <groupId>org.apache.zookeeper</groupId>
 -                    <artifactId>zookeeper</artifactId>
 -                </exclusion>
 -                <exclusion>
 -                    <groupId>log4j</groupId>
 -                    <artifactId>log4j</artifactId>
 -                </exclusion>
 -            </exclusions>
 -        </dependency>
 -        <dependency>
 -            <groupId>org.apache.storm</groupId>
 -            <artifactId>storm-core</artifactId>
 -            <version>0.9.2-incubating-SNAPSHOT</version>
 +            <groupId>reply</groupId>
 +            <artifactId>reply</artifactId>
 +            <version>${reply.version}</version>
              <scope>provided</scope>
          </dependency>
      </dependencies>


[20/50] [abbrv] git commit: Make the spout's Storm Metrics' time bucket size configurable via TridentKafkaConfig. Leave the default time bucket size at 60 seconds.

Posted by pt...@apache.org.
Make the spout's Storm Metrics' time bucket size configurable via
TridentKafkaConfig. Leave the default time bucket size at 60 seconds.

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/b6b1f1d8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/b6b1f1d8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/b6b1f1d8

Branch: refs/heads/master
Commit: b6b1f1d8fb5a56cf14c44ff46aa513e0fa4e99ab
Parents: 312408a
Author: Danijel Schiavuzzi <da...@infobip.com>
Authored: Tue Feb 11 15:28:59 2014 +0100
Committer: Danijel Schiavuzzi <da...@infobip.com>
Committed: Tue Feb 11 15:28:59 2014 +0100

----------------------------------------------------------------------
 pom.xml                                              | 4 ++--
 src/jvm/storm/kafka/KafkaConfig.java                 | 1 +
 src/jvm/storm/kafka/trident/TridentKafkaEmitter.java | 6 +++---
 3 files changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/b6b1f1d8/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 910041a..8dbd9a9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
     <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.4.0-SNAPSHOT</version>
+    <version>0.4.0-configurable-metrics-emit-interval-SNAPSHOT</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
     <licenses>
@@ -170,7 +170,7 @@
         <dependency>
             <groupId>storm</groupId>
             <artifactId>storm</artifactId>
-            <version>0.9.0</version>
+            <version>0.9.0.1</version>
             <scope>provided</scope>
         </dependency>
         <dependency>

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/b6b1f1d8/src/jvm/storm/kafka/KafkaConfig.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaConfig.java b/src/jvm/storm/kafka/KafkaConfig.java
index dddcead..8ef2a88 100644
--- a/src/jvm/storm/kafka/KafkaConfig.java
+++ b/src/jvm/storm/kafka/KafkaConfig.java
@@ -18,6 +18,7 @@ public class KafkaConfig implements Serializable {
     public boolean forceFromStart = false;
     public long startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
     public boolean useStartOffsetTimeIfOffsetOutOfRange = true;
+    public int metricsTimeBucketSizeInSecs = 60;
 
     public KafkaConfig(BrokerHosts hosts, String topic) {
         this(hosts, topic, kafka.api.OffsetRequest.DefaultClientId());

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/b6b1f1d8/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
index fbbbd4b..66785f0 100644
--- a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
+++ b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
@@ -49,9 +49,9 @@ public class TridentKafkaEmitter {
         _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
         _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
         _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_config.topic, _connections);
-        context.registerMetric("kafkaOffset", _kafkaOffsetMetric, 60);
-        _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), 60);
-        _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), 60);
+        context.registerMetric("kafkaOffset", _kafkaOffsetMetric, _config.metricsTimeBucketSizeInSecs);
+        _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), _config.metricsTimeBucketSizeInSecs);
+        _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), _config.metricsTimeBucketSizeInSecs);
     }
 
 


[14/50] [abbrv] git commit: fix trident keep refreshing partition issue

Posted by pt...@apache.org.
fix trident keep refreshing partition issue


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/0a6203ac
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/0a6203ac
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/0a6203ac

Branch: refs/heads/master
Commit: 0a6203acc6a2765049af6913b8deb070fb63a244
Parents: f789091
Author: Edison Xu <xe...@gmail.com>
Authored: Wed Jan 22 15:07:04 2014 +0800
Committer: Edison Xu <xe...@gmail.com>
Committed: Wed Jan 22 15:07:04 2014 +0800

----------------------------------------------------------------------
 .../trident/GlobalPartitionInformation.java     | 26 ++++++++++++++++++++
 1 file changed, 26 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/0a6203ac/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
index a790009..e73d6b0 100644
--- a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
+++ b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
@@ -64,4 +64,30 @@ public class GlobalPartitionInformation implements Iterable<Partition>, Serializ
             }
         };
     }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result
+                + ((partitionMap == null) ? 0 : partitionMap.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        GlobalPartitionInformation other = (GlobalPartitionInformation) obj;
+        if (partitionMap == null) {
+            if (other.partitionMap != null)
+                return false;
+        } else if (!partitionMap.equals(other.partitionMap))
+            return false;
+        return true;
+    }
 }


[40/50] [abbrv] git commit: Merge branch 'master' of github.com:wurstmeister/storm-kafka-0.8-plus

Posted by pt...@apache.org.
Merge branch 'master' of github.com:wurstmeister/storm-kafka-0.8-plus


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/adab172a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/adab172a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/adab172a

Branch: refs/heads/master
Commit: adab172a7d5621eb4497f9ec511f43c04991b2e3
Parents: e1b6fb4 c8c04a6
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Wed Apr 9 09:32:23 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Wed Apr 9 09:32:23 2014 -0400

----------------------------------------------------------------------
 CHANGELOG.md                                    |   3 +
 README.md                                       |  10 +-
 src/jvm/storm/kafka/KafkaUtils.java             |  35 ++++-
 src/jvm/storm/kafka/PartitionManager.java       |  34 ++---
 src/jvm/storm/kafka/StaticCoordinator.java      |   7 +-
 src/jvm/storm/kafka/ZkCoordinator.java          |  35 +++--
 .../storm/kafka/DynamicBrokersReaderTest.java   |   2 +
 src/test/storm/kafka/KafkaTestBroker.java       |   1 +
 src/test/storm/kafka/KafkaUtilsTest.java        |  40 ++++++
 src/test/storm/kafka/TestUtils.java             |  20 +++
 src/test/storm/kafka/ZkCoordinatorTest.java     | 130 +++++++++++++++++++
 src/test/storm/kafka/bolt/KafkaBoltTest.java    |   1 +
 12 files changed, 270 insertions(+), 48 deletions(-)
----------------------------------------------------------------------



[34/50] [abbrv] git commit: fixed KafkaSpout partition assignment

Posted by pt...@apache.org.
fixed KafkaSpout partition assignment

* added partitions -> task mapping logging


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/2f45866c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/2f45866c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/2f45866c

Branch: refs/heads/master
Commit: 2f45866c8e011ac4804c940ff9e1d7c147591761
Parents: 09ae973
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Mon Mar 31 07:35:22 2014 +0100
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Mon Mar 31 07:35:22 2014 +0100

----------------------------------------------------------------------
 CHANGELOG.md                                |   2 +
 src/jvm/storm/kafka/KafkaUtils.java         |  31 +++++-
 src/jvm/storm/kafka/StaticCoordinator.java  |   7 +-
 src/jvm/storm/kafka/ZkCoordinator.java      |  37 ++++---
 src/test/storm/kafka/KafkaUtilsTest.java    |  39 +++++++
 src/test/storm/kafka/TestUtils.java         |  20 ++++
 src/test/storm/kafka/ZkCoordinatorTest.java | 128 +++++++++++++++++++++++
 7 files changed, 238 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2f45866c/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 74ab824..133ffda 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,5 @@
+## 0.5.0
+* fixed partition assignment for KafkaSpout
 ## 0.4.0
 * added support for reading kafka message keys
 * configurable metrics emit interval

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2f45866c/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaUtils.java b/src/jvm/storm/kafka/KafkaUtils.java
index eed438f..4e8b3a3 100644
--- a/src/jvm/storm/kafka/KafkaUtils.java
+++ b/src/jvm/storm/kafka/KafkaUtils.java
@@ -2,6 +2,7 @@ package storm.kafka;
 
 import backtype.storm.metric.api.IMetric;
 import backtype.storm.utils.Utils;
+import com.google.common.base.Preconditions;
 import kafka.api.FetchRequest;
 import kafka.api.FetchRequestBuilder;
 import kafka.api.PartitionOffsetRequestInfo;
@@ -13,6 +14,7 @@ import kafka.javaapi.message.ByteBufferMessageSet;
 import kafka.message.Message;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import storm.kafka.trident.GlobalPartitionInformation;
 import storm.kafka.trident.IBrokerReader;
 import storm.kafka.trident.StaticBrokerReader;
 import storm.kafka.trident.ZkBrokerReader;
@@ -39,7 +41,7 @@ public class KafkaUtils {
 
     public static long getOffset(SimpleConsumer consumer, String topic, int partition, KafkaConfig config) {
         long startOffsetTime = kafka.api.OffsetRequest.LatestTime();
-        if ( config.forceFromStart ) {
+        if (config.forceFromStart) {
             startOffsetTime = config.startOffsetTime;
         }
         return getOffset(consumer, topic, partition, startOffsetTime);
@@ -91,7 +93,7 @@ public class KafkaUtils {
                             LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
                             return null;
                         }
-                        long earliestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.EarliestTime()); 
+                        long earliestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.EarliestTime());
                         long latestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
                         if (earliestTimeOffset == 0 || latestTimeOffset == 0) {
                             LOG.warn("No data found in Kafka Partition " + partition.getId());
@@ -184,4 +186,29 @@ public class KafkaUtils {
         return tups;
     }
 
+
+    public static List<Partition> calculatePartitionsForTask(GlobalPartitionInformation partitionInformation, int totalTasks, int taskIndex) {
+        Preconditions.checkArgument(taskIndex < totalTasks, "task index must be less that total tasks");
+        List<Partition> partitions = partitionInformation.getOrderedPartitions();
+        int numPartitions = partitions.size();
+        if (numPartitions < totalTasks) {
+            LOG.warn("there are more tasks than partitions (tasks: " + totalTasks + "; partitions: " + numPartitions + "), some tasks will be idle");
+        }
+        List<Partition> taskPartitions = new ArrayList<Partition>();
+        for (int i = taskIndex; i < numPartitions; i += totalTasks) {
+            Partition taskPartition = partitions.get(i);
+            taskPartitions.add(taskPartition);
+        }
+        logPartitionMapping(totalTasks, taskIndex, taskPartitions);
+        return taskPartitions;
+    }
+
+    private static void logPartitionMapping(int totalTasks, int taskIndex, List<Partition> taskPartitions) {
+        String taskPrefix = "[" + taskIndex + "/" + totalTasks + "] --> ";
+        if (taskPartitions.isEmpty()) {
+            LOG.warn(taskPrefix + "no partitions assigned");
+        } else {
+            LOG.info(taskPrefix + "assigned " + taskPartitions);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2f45866c/src/jvm/storm/kafka/StaticCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/StaticCoordinator.java b/src/jvm/storm/kafka/StaticCoordinator.java
index 7415522..040060c 100644
--- a/src/jvm/storm/kafka/StaticCoordinator.java
+++ b/src/jvm/storm/kafka/StaticCoordinator.java
@@ -12,13 +12,10 @@ public class StaticCoordinator implements PartitionCoordinator {
 
     public StaticCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig config, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
         StaticHosts hosts = (StaticHosts) config.hosts;
-        List<Partition> partitions = hosts.getPartitionInformation().getOrderedPartitions();
-        for (int i = taskIndex; i < partitions.size(); i += totalTasks) {
-            Partition myPartition = partitions.get(i);
+        List<Partition> myPartitions = KafkaUtils.calculatePartitionsForTask(hosts.getPartitionInformation(), totalTasks, taskIndex);
+        for (Partition myPartition : myPartitions) {
             _managers.put(myPartition, new PartitionManager(connections, topologyInstanceId, state, stormConf, config, myPartition));
-
         }
-
         _allManagers = new ArrayList(_managers.values());
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2f45866c/src/jvm/storm/kafka/ZkCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/ZkCoordinator.java b/src/jvm/storm/kafka/ZkCoordinator.java
index 98e51a3..35d2c57 100644
--- a/src/jvm/storm/kafka/ZkCoordinator.java
+++ b/src/jvm/storm/kafka/ZkCoordinator.java
@@ -1,6 +1,5 @@
 package storm.kafka;
 
-import backtype.storm.task.IMetricsContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import storm.kafka.trident.GlobalPartitionInformation;
@@ -22,9 +21,12 @@ public class ZkCoordinator implements PartitionCoordinator {
     DynamicBrokersReader _reader;
     ZkState _state;
     Map _stormConf;
-    IMetricsContext _metricsContext;
 
     public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
+        this(connections, stormConf, spoutConfig, state, taskIndex, totalTasks, topologyInstanceId, buildReader(stormConf, spoutConfig));
+    }
+
+    public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId, DynamicBrokersReader reader) {
         _spoutConfig = spoutConfig;
         _connections = connections;
         _taskIndex = taskIndex;
@@ -32,11 +34,14 @@ public class ZkCoordinator implements PartitionCoordinator {
         _topologyInstanceId = topologyInstanceId;
         _stormConf = stormConf;
         _state = state;
-
         ZkHosts brokerConf = (ZkHosts) spoutConfig.hosts;
         _refreshFreqMs = brokerConf.refreshFreqSecs * 1000;
-        _reader = new DynamicBrokersReader(stormConf, brokerConf.brokerZkStr, brokerConf.brokerZkPath, spoutConfig.topic);
+        _reader = reader;
+    }
 
+    private static DynamicBrokersReader buildReader(Map stormConf, SpoutConfig spoutConfig) {
+        ZkHosts hosts = (ZkHosts) spoutConfig.hosts;
+        return new DynamicBrokersReader(stormConf, hosts.brokerZkStr, hosts.brokerZkPath, spoutConfig.topic);
     }
 
     @Override
@@ -50,14 +55,9 @@ public class ZkCoordinator implements PartitionCoordinator {
 
     void refresh() {
         try {
-            LOG.info("Refreshing partition manager connections");
+            LOG.info(taskIdentifier() + "Refreshing partition manager connections");
             GlobalPartitionInformation brokerInfo = _reader.getBrokerInfo();
-            Set<Partition> mine = new HashSet();
-            for (Partition partitionId : brokerInfo) {
-                if (myOwnership(partitionId)) {
-                    mine.add(partitionId);
-                }
-            }
+            List<Partition> mine = KafkaUtils.calculatePartitionsForTask(brokerInfo, _totalTasks, _taskIndex);
 
             Set<Partition> curr = _managers.keySet();
             Set<Partition> newPartitions = new HashSet<Partition>(mine);
@@ -66,13 +66,13 @@ public class ZkCoordinator implements PartitionCoordinator {
             Set<Partition> deletedPartitions = new HashSet<Partition>(curr);
             deletedPartitions.removeAll(mine);
 
-            LOG.info("Deleted partition managers: " + deletedPartitions.toString());
+            LOG.info(taskIdentifier() + "Deleted partition managers: " + deletedPartitions.toString());
 
             for (Partition id : deletedPartitions) {
                 PartitionManager man = _managers.remove(id);
                 man.close();
             }
-            LOG.info("New partition managers: " + newPartitions.toString());
+            LOG.info(taskIdentifier() + "New partition managers: " + newPartitions.toString());
 
             for (Partition id : newPartitions) {
                 PartitionManager man = new PartitionManager(_connections, _topologyInstanceId, _state, _stormConf, _spoutConfig, id);
@@ -83,16 +83,15 @@ public class ZkCoordinator implements PartitionCoordinator {
             throw new RuntimeException(e);
         }
         _cachedList = new ArrayList<PartitionManager>(_managers.values());
-        LOG.info("Finished refreshing");
+        LOG.info(taskIdentifier() + "Finished refreshing");
+    }
+
+    private String taskIdentifier() {
+        return "[" + _taskIndex + "/" + _totalTasks + "] - ";
     }
 
     @Override
     public PartitionManager getManager(Partition partition) {
         return _managers.get(partition);
     }
-
-    private boolean myOwnership(Partition id) {
-        int val = Math.abs(id.host.hashCode() + 23 * id.partition);
-        return val % _totalTasks == _taskIndex;
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2f45866c/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaUtilsTest.java b/src/test/storm/kafka/KafkaUtilsTest.java
index 0763042..a4e7f52 100644
--- a/src/test/storm/kafka/KafkaUtilsTest.java
+++ b/src/test/storm/kafka/KafkaUtilsTest.java
@@ -178,4 +178,43 @@ public class KafkaUtilsTest {
         Producer<String, String> producer = new Producer<String, String>(producerConfig);
         producer.send(new KeyedMessage<String, String>(config.topic, key, value));
     }
+
+
+    @Test
+    public void assignOnePartitionPerTask() {
+        runPartitionToTaskMappingTest(16, 1);
+    }
+
+    @Test
+    public void assignTwoPartitionsPerTask() {
+        runPartitionToTaskMappingTest(16, 2);
+    }
+
+    @Test
+    public void assignAllPartitionsToOneTask() {
+        runPartitionToTaskMappingTest(32, 32);
+    }
+
+
+    public void runPartitionToTaskMappingTest(int numPartitions, int partitionsPerTask) {
+        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(numPartitions);
+        int numTasks = numPartitions / partitionsPerTask;
+        for (int i = 0 ; i < numTasks ; i++) {
+            assertEquals(partitionsPerTask, KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, numTasks, i).size());
+        }
+    }
+
+    @Test
+    public void moreTasksThanPartitions() {
+        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(1);
+        int numTasks = 2;
+        assertEquals(1, KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, numTasks, 0).size());
+        assertEquals(0, KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, numTasks, 1).size());
+    }
+
+    @Test (expected = IllegalArgumentException.class )
+    public void assignInvalidTask() {
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
+        KafkaUtils.calculatePartitionsForTask(globalPartitionInformation, 1, 1);
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2f45866c/src/test/storm/kafka/TestUtils.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/TestUtils.java b/src/test/storm/kafka/TestUtils.java
new file mode 100644
index 0000000..860d96d
--- /dev/null
+++ b/src/test/storm/kafka/TestUtils.java
@@ -0,0 +1,20 @@
+package storm.kafka;
+
+import storm.kafka.trident.GlobalPartitionInformation;
+
+public class TestUtils {
+
+    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions) {
+        return buildPartitionInfo(numPartitions, 9092);
+    }
+
+
+    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions, int brokerPort) {
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
+        for (int i = 0; i < numPartitions; i++) {
+            globalPartitionInformation.addPartition(i, Broker.fromString("broker-" + i + " :" + brokerPort));
+        }
+        return globalPartitionInformation;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2f45866c/src/test/storm/kafka/ZkCoordinatorTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/ZkCoordinatorTest.java b/src/test/storm/kafka/ZkCoordinatorTest.java
new file mode 100644
index 0000000..35b3b4b
--- /dev/null
+++ b/src/test/storm/kafka/ZkCoordinatorTest.java
@@ -0,0 +1,128 @@
+package storm.kafka;
+
+import backtype.storm.Config;
+import com.netflix.curator.test.TestingServer;
+import kafka.javaapi.consumer.SimpleConsumer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import java.util.*;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.when;
+
+public class ZkCoordinatorTest {
+
+
+    @Mock
+    private DynamicBrokersReader reader;
+
+    @Mock
+    private DynamicPartitionConnections dynamicPartitionConnections;
+
+    private KafkaTestBroker broker = new KafkaTestBroker();
+    private TestingServer server;
+    private Map stormConf = new HashMap();
+    private SpoutConfig spoutConfig;
+    private ZkState state;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+        server = new TestingServer();
+        String connectionString = server.getConnectString();
+        ZkHosts hosts = new ZkHosts(connectionString);
+        hosts.refreshFreqSecs = 1;
+        spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
+        Map conf = buildZookeeperConfig(server);
+        state = new ZkState(conf);
+        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+        when(dynamicPartitionConnections.register(any(Broker.class), anyInt())).thenReturn(simpleConsumer);
+    }
+
+    private Map buildZookeeperConfig(TestingServer server) {
+        Map conf = new HashMap();
+        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, server.getPort());
+        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, Arrays.asList("localhost"));
+        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 20000);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 3);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 30);
+        return conf;
+    }
+
+    @After
+    public void shutdown() throws Exception {
+        broker.shutdown();
+        server.stop();
+    }
+
+    @Test
+    public void testOnePartitionPerTask() throws Exception {
+        int totalTasks = 64;
+        int partitionsPerTask = 1;
+        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
+        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfo(totalTasks));
+        for (ZkCoordinator coordinator : coordinatorList) {
+            List<PartitionManager> myManagedPartitions = coordinator.getMyManagedPartitions();
+            assertEquals(partitionsPerTask, myManagedPartitions.size());
+            assertEquals(coordinator._taskIndex, myManagedPartitions.get(0).getPartition().partition);
+        }
+    }
+
+
+    @Test
+    public void testPartitionsChange() throws Exception {
+        final int totalTasks = 64;
+        int partitionsPerTask = 2;
+        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
+        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfo(totalTasks, 9092));
+        List<List<PartitionManager>> partitionManagersBeforeRefresh = getPartitionManagers(coordinatorList);
+        waitForRefresh();
+        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfo(totalTasks, 9093));
+        List<List<PartitionManager>> partitionManagersAfterRefresh = getPartitionManagers(coordinatorList);
+        assertEquals(partitionManagersAfterRefresh.size(), partitionManagersAfterRefresh.size());
+        Iterator<List<PartitionManager>> iterator = partitionManagersAfterRefresh.iterator();
+        for (List<PartitionManager> partitionManagersBefore : partitionManagersBeforeRefresh) {
+            List<PartitionManager> partitionManagersAfter = iterator.next();
+            assertPartitionsAreDifferent(partitionManagersBefore, partitionManagersAfter, partitionsPerTask);
+        }
+    }
+
+    private void assertPartitionsAreDifferent(List<PartitionManager> partitionManagersBefore, List<PartitionManager> partitionManagersAfter, int partitionsPerTask) {
+        assertEquals(partitionsPerTask, partitionManagersBefore.size());
+        assertEquals(partitionManagersBefore.size(), partitionManagersAfter.size());
+        for (int i = 0; i < partitionsPerTask; i++) {
+            assertNotEquals(partitionManagersBefore.get(i).getPartition(), partitionManagersAfter.get(i).getPartition());
+        }
+
+    }
+
+    private List<List<PartitionManager>> getPartitionManagers(List<ZkCoordinator> coordinatorList) {
+        List<List<PartitionManager>> partitions = new ArrayList();
+        for (ZkCoordinator coordinator : coordinatorList) {
+            partitions.add(coordinator.getMyManagedPartitions());
+        }
+        return partitions;
+    }
+
+    private void waitForRefresh() throws InterruptedException {
+        Thread.sleep(((ZkHosts) spoutConfig.hosts).refreshFreqSecs * 1000 + 1);
+    }
+
+    private List<ZkCoordinator> buildCoordinators(int totalTasks) {
+        List<ZkCoordinator> coordinatorList = new ArrayList<ZkCoordinator>();
+        for (int i = 0; i < totalTasks; i++) {
+            ZkCoordinator coordinator = new ZkCoordinator(dynamicPartitionConnections, stormConf, spoutConfig, state, i, totalTasks, "test-id", reader);
+            coordinatorList.add(coordinator);
+        }
+        return coordinatorList;
+    }
+
+
+}


[49/50] [abbrv] git commit: Merge branch 'storm-kafka-module'

Posted by pt...@apache.org.
Merge branch 'storm-kafka-module'


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/98265c75
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/98265c75
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/98265c75

Branch: refs/heads/master
Commit: 98265c75aae72f535eb2beb9b97b68db44adb0ab
Parents: 6cc9bfe ae728f8
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Mon Apr 21 15:38:02 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Mon Apr 21 15:38:02 2014 -0400

----------------------------------------------------------------------
 external/storm-kafka/CHANGELOG.md               |  13 +
 external/storm-kafka/README.md                  |  25 ++
 external/storm-kafka/pom.xml                    | 138 ++++++++++
 .../storm-kafka/src/jvm/storm/kafka/Broker.java |  80 ++++++
 .../src/jvm/storm/kafka/BrokerHosts.java        |  25 ++
 .../jvm/storm/kafka/DynamicBrokersReader.java   | 145 ++++++++++
 .../kafka/DynamicPartitionConnections.java      |  94 +++++++
 .../jvm/storm/kafka/FailedFetchException.java   |  29 ++
 .../src/jvm/storm/kafka/KafkaConfig.java        |  50 ++++
 .../src/jvm/storm/kafka/KafkaError.java         |  43 +++
 .../src/jvm/storm/kafka/KafkaSpout.java         | 190 +++++++++++++
 .../src/jvm/storm/kafka/KafkaUtils.java         | 235 ++++++++++++++++
 .../src/jvm/storm/kafka/KeyValueScheme.java     |  28 ++
 .../kafka/KeyValueSchemeAsMultiScheme.java      |  36 +++
 .../src/jvm/storm/kafka/Partition.java          |  64 +++++
 .../jvm/storm/kafka/PartitionCoordinator.java   |  26 ++
 .../src/jvm/storm/kafka/PartitionManager.java   | 241 +++++++++++++++++
 .../src/jvm/storm/kafka/SpoutConfig.java        |  36 +++
 .../src/jvm/storm/kafka/StaticCoordinator.java  |  48 ++++
 .../src/jvm/storm/kafka/StaticHosts.java        |  38 +++
 .../storm/kafka/StaticPartitionConnections.java |  52 ++++
 .../jvm/storm/kafka/StringKeyValueScheme.java   |  37 +++
 .../src/jvm/storm/kafka/StringScheme.java       |  46 ++++
 .../src/jvm/storm/kafka/ZkCoordinator.java      | 112 ++++++++
 .../src/jvm/storm/kafka/ZkHosts.java            |  36 +++
 .../src/jvm/storm/kafka/ZkState.java            | 116 ++++++++
 .../src/jvm/storm/kafka/bolt/KafkaBolt.java     |  89 ++++++
 .../jvm/storm/kafka/trident/Coordinator.java    |  50 ++++
 .../storm/kafka/trident/DefaultCoordinator.java |  31 +++
 .../trident/GlobalPartitionInformation.java     |  99 +++++++
 .../storm/kafka/trident/IBatchCoordinator.java  |  26 ++
 .../jvm/storm/kafka/trident/IBrokerReader.java  |  25 ++
 .../src/jvm/storm/kafka/trident/MaxMetric.java  |  40 +++
 .../kafka/trident/OpaqueTridentKafkaSpout.java  |  59 ++++
 .../storm/kafka/trident/StaticBrokerReader.java |  36 +++
 .../trident/TransactionalTridentKafkaSpout.java |  58 ++++
 .../storm/kafka/trident/TridentKafkaConfig.java |  37 +++
 .../kafka/trident/TridentKafkaEmitter.java      | 269 +++++++++++++++++++
 .../jvm/storm/kafka/trident/ZkBrokerReader.java |  62 +++++
 .../storm/kafka/DynamicBrokersReaderTest.java   | 155 +++++++++++
 .../src/test/storm/kafka/KafkaErrorTest.java    |  39 +++
 .../src/test/storm/kafka/KafkaTestBroker.java   |  58 ++++
 .../src/test/storm/kafka/KafkaUtilsTest.java    | 221 +++++++++++++++
 .../storm/kafka/StringKeyValueSchemeTest.java   |  38 +++
 .../src/test/storm/kafka/TestUtils.java         |  20 ++
 .../src/test/storm/kafka/ZkCoordinatorTest.java | 130 +++++++++
 .../test/storm/kafka/bolt/KafkaBoltTest.java    | 171 ++++++++++++
 pom.xml                                         |   1 +
 storm-dist/binary/src/main/assembly/binary.xml  |  19 ++
 49 files changed, 3716 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/98265c75/pom.xml
----------------------------------------------------------------------


[21/50] [abbrv] git commit: Update pom.xml

Posted by pt...@apache.org.
Update pom.xml

Revert custom changes

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/2a0757ac
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/2a0757ac
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/2a0757ac

Branch: refs/heads/master
Commit: 2a0757ace47466e4ce13f670e977128c546c0349
Parents: b6b1f1d
Author: Danijel Schiavuzzi <da...@schiavuzzi.com>
Authored: Mon Feb 17 11:43:20 2014 +0100
Committer: Danijel Schiavuzzi <da...@schiavuzzi.com>
Committed: Mon Feb 17 11:43:20 2014 +0100

----------------------------------------------------------------------
 pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/2a0757ac/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8dbd9a9..910041a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
     <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.4.0-configurable-metrics-emit-interval-SNAPSHOT</version>
+    <version>0.4.0-SNAPSHOT</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
     <licenses>
@@ -170,7 +170,7 @@
         <dependency>
             <groupId>storm</groupId>
             <artifactId>storm</artifactId>
-            <version>0.9.0.1</version>
+            <version>0.9.0</version>
             <scope>provided</scope>
         </dependency>
         <dependency>


[38/50] [abbrv] git commit: updated log messages

Posted by pt...@apache.org.
updated log messages


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/9370c5cc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/9370c5cc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/9370c5cc

Branch: refs/heads/master
Commit: 9370c5ccaab437f2aa1c0f5fad55802aaa3b2b96
Parents: 98cfe93
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Apr 5 13:44:35 2014 +0100
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Apr 5 13:54:39 2014 +0100

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaUtils.java       |  6 +++++-
 src/jvm/storm/kafka/PartitionManager.java | 21 +++++++++------------
 src/jvm/storm/kafka/ZkCoordinator.java    | 14 ++++++--------
 3 files changed, 20 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/9370c5cc/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaUtils.java b/src/jvm/storm/kafka/KafkaUtils.java
index 4e8b3a3..0e7f601 100644
--- a/src/jvm/storm/kafka/KafkaUtils.java
+++ b/src/jvm/storm/kafka/KafkaUtils.java
@@ -204,11 +204,15 @@ public class KafkaUtils {
     }
 
     private static void logPartitionMapping(int totalTasks, int taskIndex, List<Partition> taskPartitions) {
-        String taskPrefix = "[" + taskIndex + "/" + totalTasks + "] --> ";
+        String taskPrefix = taskId(taskIndex, totalTasks);
         if (taskPartitions.isEmpty()) {
             LOG.warn(taskPrefix + "no partitions assigned");
         } else {
             LOG.info(taskPrefix + "assigned " + taskPartitions);
         }
     }
+
+    public static String taskId(int taskIndex, int totalTasks) {
+        return "Task [" + (taskIndex + 1) + "/" + totalTasks + "] ";
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/9370c5cc/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionManager.java b/src/jvm/storm/kafka/PartitionManager.java
index fc9d817..915f0f9 100644
--- a/src/jvm/storm/kafka/PartitionManager.java
+++ b/src/jvm/storm/kafka/PartitionManager.java
@@ -6,7 +6,6 @@ import backtype.storm.metric.api.CountMetric;
 import backtype.storm.metric.api.MeanReducer;
 import backtype.storm.metric.api.ReducedMetric;
 import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.utils.Utils;
 import com.google.common.collect.ImmutableMap;
 import kafka.api.OffsetRequest;
 import kafka.javaapi.consumer.SimpleConsumer;
@@ -64,7 +63,7 @@ public class PartitionManager {
         String path = committedPath();
         try {
             Map<Object, Object> json = _state.readJSON(path);
-            LOG.info("Read partition information from: " + path +  "  --> " + json );
+            LOG.info("Read partition information from: " + path +  " --> " + json );
             if (json != null) {
                 jsonTopologyId = (String) ((Map<Object, Object>) json.get("topology")).get("id");
                 jsonOffset = (Long) json.get("offset");
@@ -84,7 +83,7 @@ public class PartitionManager {
             LOG.info("Read last commit offset from zookeeper: " + _committedTo + "; old topology_id: " + jsonTopologyId + " - new topology_id: " + topologyInstanceId );
         }
 
-        LOG.info("Starting Kafka " + _consumer.host() + ":" + id.partition + " from offset " + _committedTo);
+        LOG.info("Starting " + _partition + " from offset " + _committedTo);
         _emittedToOffset = _committedTo;
 
         _fetchAPILatencyMax = new CombinedMetric(new MaxMetric());
@@ -141,7 +140,7 @@ public class PartitionManager {
         _fetchAPIMessageCount.incrBy(numMessages);
 
         if (numMessages > 0) {
-            LOG.info("Fetched " + numMessages + " messages from Kafka: " + _consumer.host() + ":" + _partition.partition);
+            LOG.info("Fetched " + numMessages + " messages from: " + _partition);
         }
         for (MessageAndOffset msg : msgs) {
             _pending.add(_emittedToOffset);
@@ -149,7 +148,7 @@ public class PartitionManager {
             _emittedToOffset = msg.nextOffset();
         }
         if (numMessages > 0) {
-            LOG.info("Added " + numMessages + " messages from Kafka: " + _consumer.host() + ":" + _partition.partition + " to internal buffers");
+            LOG.info("Added " + numMessages + " messages from: " + _partition + " to internal buffers");
         }
     }
 
@@ -175,7 +174,6 @@ public class PartitionManager {
     }
 
     public void commit() {
-        LOG.info("Committing offset for " + _partition);
         long committedTo;
         if (_pending.isEmpty()) {
             committedTo = _emittedToOffset;
@@ -183,9 +181,8 @@ public class PartitionManager {
             committedTo = _pending.first();
         }
         if (committedTo != _committedTo) {
-            LOG.info("Writing committed offset to ZK: " + committedTo);
-
-            Map<Object, Object> data = (Map<Object, Object>) ImmutableMap.builder()
+            LOG.info("Writing committed offset (" + committedTo + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
+            Map<Object, Object> data = ImmutableMap.builder()
                     .put("topology", ImmutableMap.of("id", _topologyInstanceId,
                             "name", _stormConf.get(Config.TOPOLOGY_NAME)))
                     .put("offset", committedTo)
@@ -194,11 +191,11 @@ public class PartitionManager {
                             "port", _partition.host.port))
                     .put("topic", _spoutConfig.topic).build();
             _state.writeJSON(committedPath(), data);
-
-            LOG.info("Wrote committed offset to ZK: " + committedTo);
             _committedTo = committedTo;
+            LOG.info("Wrote committed offset (" + committedTo + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
+        } else {
+            LOG.info("No new offset for " + _partition + " for topology: " + _topologyInstanceId);
         }
-        LOG.info("Committed offset " + committedTo + " for " + _partition + " for topology: " + _topologyInstanceId);
     }
 
     private String committedPath() {

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/9370c5cc/src/jvm/storm/kafka/ZkCoordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/ZkCoordinator.java b/src/jvm/storm/kafka/ZkCoordinator.java
index 35d2c57..ec35aed 100644
--- a/src/jvm/storm/kafka/ZkCoordinator.java
+++ b/src/jvm/storm/kafka/ZkCoordinator.java
@@ -6,6 +6,8 @@ import storm.kafka.trident.GlobalPartitionInformation;
 
 import java.util.*;
 
+import static storm.kafka.KafkaUtils.taskId;
+
 public class ZkCoordinator implements PartitionCoordinator {
     public static final Logger LOG = LoggerFactory.getLogger(ZkCoordinator.class);
 
@@ -55,7 +57,7 @@ public class ZkCoordinator implements PartitionCoordinator {
 
     void refresh() {
         try {
-            LOG.info(taskIdentifier() + "Refreshing partition manager connections");
+            LOG.info(taskId(_taskIndex, _totalTasks) + "Refreshing partition manager connections");
             GlobalPartitionInformation brokerInfo = _reader.getBrokerInfo();
             List<Partition> mine = KafkaUtils.calculatePartitionsForTask(brokerInfo, _totalTasks, _taskIndex);
 
@@ -66,13 +68,13 @@ public class ZkCoordinator implements PartitionCoordinator {
             Set<Partition> deletedPartitions = new HashSet<Partition>(curr);
             deletedPartitions.removeAll(mine);
 
-            LOG.info(taskIdentifier() + "Deleted partition managers: " + deletedPartitions.toString());
+            LOG.info(taskId(_taskIndex, _totalTasks) + "Deleted partition managers: " + deletedPartitions.toString());
 
             for (Partition id : deletedPartitions) {
                 PartitionManager man = _managers.remove(id);
                 man.close();
             }
-            LOG.info(taskIdentifier() + "New partition managers: " + newPartitions.toString());
+            LOG.info(taskId(_taskIndex, _totalTasks) + "New partition managers: " + newPartitions.toString());
 
             for (Partition id : newPartitions) {
                 PartitionManager man = new PartitionManager(_connections, _topologyInstanceId, _state, _stormConf, _spoutConfig, id);
@@ -83,11 +85,7 @@ public class ZkCoordinator implements PartitionCoordinator {
             throw new RuntimeException(e);
         }
         _cachedList = new ArrayList<PartitionManager>(_managers.values());
-        LOG.info(taskIdentifier() + "Finished refreshing");
-    }
-
-    private String taskIdentifier() {
-        return "[" + _taskIndex + "/" + _totalTasks + "] - ";
+        LOG.info(taskId(_taskIndex, _totalTasks) + "Finished refreshing");
     }
 
     @Override


[46/50] [abbrv] git commit: update README

Posted by pt...@apache.org.
update README


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/3270dd2a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/3270dd2a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/3270dd2a

Branch: refs/heads/master
Commit: 3270dd2a83f5b78d51f23ae1b8b6e3951c60b2c0
Parents: 859a2e8
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Wed Apr 9 11:26:33 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Wed Apr 9 11:26:33 2014 -0400

----------------------------------------------------------------------
 external/storm-kafka/README.md | 29 ++++++++++++++++-------------
 1 file changed, 16 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/3270dd2a/external/storm-kafka/README.md
----------------------------------------------------------------------
diff --git a/external/storm-kafka/README.md b/external/storm-kafka/README.md
index 874db01..726cff6 100644
--- a/external/storm-kafka/README.md
+++ b/external/storm-kafka/README.md
@@ -1,22 +1,25 @@
-storm-kafka-0.8-plus
+Storm Kafka
 ====================
 
-Port of storm-kafka to support kafka >= 0.8
+Provides core storm and Trident spout implementations for consuming data from Apache Kafka 0.8.x.
 
-##Usage:
-For information on how to use this library in your project see:
 
-[https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus](https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus)
 
+##Usage Example
 
-##Example Topologies:
+```java
+        
+        TridentTopology topology = new TridentTopology();
 
-[https://github.com/wurstmeister/storm-kafka-0.8-plus-test](https://github.com/wurstmeister/storm-kafka-0.8-plus-test)
+        BrokerHosts zk = new ZkHosts("localhost");
 
-##Acknowledgement:
+        TridentKafkaConfig spoutConf = new TridentKafkaConfig(zk, "test-topic");
+        spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
+        OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(spoutConf);
 
-YourKit is kindly supporting this open source project with its full-featured Java Profiler.
-YourKit, LLC is the creator of innovative and intelligent tools for profiling
-Java and .NET applications. Take a look at YourKit's leading software products:
-<a href="http://www.yourkit.com/java/profiler/index.jsp">YourKit Java Profiler</a> and
-<a href="http://www.yourkit.com/.net/profiler/index.jsp">YourKit .NET Profiler</a>.
+
+```
+
+## Committer Sponsors
+
+ * P. Taylor Goetz ([ptgoetz@apache.org](mailto:ptgoetz@apache.org))
\ No newline at end of file


[02/50] [abbrv] git commit: updated readme

Posted by pt...@apache.org.
updated readme


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/d35a6eee
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/d35a6eee
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/d35a6eee

Branch: refs/heads/master
Commit: d35a6eee117ec401e5fea992200167ead9f764d7
Parents: a440ecc
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Dec 22 14:18:19 2013 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Dec 22 14:18:19 2013 +0000

----------------------------------------------------------------------
 README.md | 8 +++++++-
 pom.xml   | 2 +-
 2 files changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/d35a6eee/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index fbfcdad..b734b5b 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,10 @@
 storm-kafka-0.8-plus
 ====================
 
-Port of storm-kafka to support kafka >= 0.8
\ No newline at end of file
+Port of storm-kafka to support kafka >= 0.8
+
+##Usage
+For information on how to use this library in your project see:
+
+[https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus](https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus)
+

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/d35a6eee/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 430ecef..8dfd316 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
     <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.2.0</version>
+    <version>0.3.0-SNAPSHOT</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
     <licenses>


[48/50] [abbrv] git commit: add storm-kafka files to the binary distribution

Posted by pt...@apache.org.
add storm-kafka files to the binary distribution


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/ae728f81
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/ae728f81
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/ae728f81

Branch: refs/heads/master
Commit: ae728f81a3f480cc0f02209f5ed1309204a590f0
Parents: 72dbbee
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Mon Apr 21 13:30:41 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Mon Apr 21 13:30:41 2014 -0400

----------------------------------------------------------------------
 storm-dist/binary/src/main/assembly/binary.xml | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/ae728f81/storm-dist/binary/src/main/assembly/binary.xml
----------------------------------------------------------------------
diff --git a/storm-dist/binary/src/main/assembly/binary.xml b/storm-dist/binary/src/main/assembly/binary.xml
index 66b6f49..6e36704 100644
--- a/storm-dist/binary/src/main/assembly/binary.xml
+++ b/storm-dist/binary/src/main/assembly/binary.xml
@@ -60,15 +60,34 @@
             </excludes>
         </fileSet>
 
+        <!-- EXTERNAL -->
+        <fileSet>
+            <directory>${project.basedir}/../../external/storm-kafka/target</directory>
+            <outputDirectory>external/storm-kafka</outputDirectory>
+            <includes>
+                <include>storm*jar</include>
+            </includes>
+        </fileSet>
+        <fileSet>
+            <directory>${project.basedir}/../../external/storm-kafka</directory>
+            <outputDirectory>external/storm-kafka</outputDirectory>
+            <includes>
+                <include>README.*</include>
+            </includes>
+        </fileSet>
+
 
     </fileSets>
 
     <files>
+        <!-- EXAMPLES -->
         <file>
             <source>${project.basedir}/../../examples/storm-starter/target/storm-starter-${project.version}-jar-with-dependencies.jar</source>
             <outputDirectory>/examples/storm-starter/</outputDirectory>
             <destName>storm-starter-topologies-${project.version}.jar</destName>
         </file>
+
+
         <!--
             $STORM_HOME/conf
         -->


[03/50] [abbrv] use consistent formatting

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/jvm/storm/kafka/trident/ZkBrokerReader.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/ZkBrokerReader.java b/src/jvm/storm/kafka/trident/ZkBrokerReader.java
index 0a53137..5e2361d 100644
--- a/src/jvm/storm/kafka/trident/ZkBrokerReader.java
+++ b/src/jvm/storm/kafka/trident/ZkBrokerReader.java
@@ -10,9 +10,9 @@ import java.util.Map;
 
 public class ZkBrokerReader implements IBrokerReader {
 
-	public static final Logger LOG = LoggerFactory.getLogger(ZkBrokerReader.class);
+    public static final Logger LOG = LoggerFactory.getLogger(ZkBrokerReader.class);
 
-	GlobalPartitionInformation cachedBrokers;
+    GlobalPartitionInformation cachedBrokers;
     DynamicBrokersReader reader;
     long lastRefreshTimeMs;
 
@@ -30,16 +30,16 @@ public class ZkBrokerReader implements IBrokerReader {
     @Override
     public GlobalPartitionInformation getCurrentBrokers() {
         long currTime = System.currentTimeMillis();
-        if(currTime > lastRefreshTimeMs + refreshMillis) {
-			LOG.info("brokers need refreshing because " + refreshMillis + "ms have expired");
+        if (currTime > lastRefreshTimeMs + refreshMillis) {
+            LOG.info("brokers need refreshing because " + refreshMillis + "ms have expired");
             cachedBrokers = reader.getBrokerInfo();
             lastRefreshTimeMs = currTime;
-		}
+        }
         return cachedBrokers;
     }
 
     @Override
     public void close() {
         reader.close();
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e8f54d63/src/test/storm/kafka/DynamicBrokersReaderTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/DynamicBrokersReaderTest.java b/src/test/storm/kafka/DynamicBrokersReaderTest.java
index a6c2309..fd90c3c 100644
--- a/src/test/storm/kafka/DynamicBrokersReaderTest.java
+++ b/src/test/storm/kafka/DynamicBrokersReaderTest.java
@@ -21,133 +21,133 @@ import static org.junit.Assert.assertEquals;
  * Time: 20:35
  */
 public class DynamicBrokersReaderTest {
-	private DynamicBrokersReader dynamicBrokersReader;
-	private String masterPath = "/brokers";
-	private String topic = "testing";
-	private CuratorFramework zookeeper;
-	private TestingServer server;
-
-	@Before
-	public void setUp() throws Exception {
-		server = new TestingServer();
-		String connectionString = server.getConnectString();
-		Map conf = new HashMap();
-		conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
-		conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
-		conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
-		ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
-		zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
-		dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
-		zookeeper.start();
-	}
-
-	@After
-	public void tearDown() throws Exception {
-   		server.close();
-	}
-
-	private void addPartition(int id, String host, int port) throws Exception {
-		writePartitionId(id);
-		writeLeader(id, 0);
-		writeLeaderDetails(0, host, port);
-	}
-
-	private void addPartition(int id, int leader, String host, int port) throws Exception {
-		writePartitionId(id);
-		writeLeader(id, leader);
-		writeLeaderDetails(leader, host, port);
-	}
-
-	private void writePartitionId(int id) throws Exception {
-		String path = dynamicBrokersReader.partitionPath();
-		writeDataToPath(path, ("" + id));
-	}
-
-	private void writeDataToPath(String path, String data) throws Exception {
-		ZKPaths.mkdirs(zookeeper.getZookeeperClient().getZooKeeper(), path);
-		zookeeper.setData().forPath(path, data.getBytes());
-	}
-
-	private void writeLeader(int id, int leaderId) throws Exception {
-		String path = dynamicBrokersReader.partitionPath() + "/" + id + "/state";
-		String value = " { \"controller_epoch\":4, \"isr\":[ 1, 0 ], \"leader\":" +leaderId + ", \"leader_epoch\":1, \"version\":1 }";
-		writeDataToPath(path, value);
-	}
-
-	private void writeLeaderDetails(int leaderId, String host, int port) throws Exception{
-		String path = dynamicBrokersReader.brokerPath() + "/" + leaderId;
-		String value = "{ \"host\":\"" + host + "\", \"jmx_port\":9999, \"port\":" + port + ", \"version\":1 }";
-		writeDataToPath(path, value);
-	}
-
-	@Test
-	public void testGetBrokerInfo() throws Exception {
-		String host = "localhost";
-		int port = 9092;
-		int partition = 0;
-		addPartition(partition, host, port);
-		GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
-		assertEquals(1,brokerInfo.getOrderedPartitions().size());
-		assertEquals(port, brokerInfo.getHostFor(partition).port);
-		assertEquals(host, brokerInfo.getHostFor(partition).host);
-	}
-
-
-	@Test
-	public void testMultiplePartitionsOnDifferentHosts() throws Exception {
-		String host = "localhost";
-		int port = 9092;
-		int secondPort = 9093;
-		int partition = 0;
-		int secondPartition = partition + 1;
-		addPartition(partition, 0, host, port);
-		addPartition(secondPartition, 1,  host, secondPort);
-
-		GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
-		assertEquals(2,brokerInfo.getOrderedPartitions().size());
-
-		assertEquals(port, brokerInfo.getHostFor(partition).port);
-		assertEquals(host, brokerInfo.getHostFor(partition).host);
-
-		assertEquals(secondPort, brokerInfo.getHostFor(secondPartition).port);
-		assertEquals(host, brokerInfo.getHostFor(secondPartition).host);
-	}
-
-
-	@Test
-	public void testMultiplePartitionsOnSameHost() throws Exception {
-		String host = "localhost";
-		int port = 9092;
-		int partition = 0;
-		int secondPartition = partition + 1;
-		addPartition(partition, 0, host, port);
-		addPartition(secondPartition, 0,  host, port);
-
-		GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
-		assertEquals(2,brokerInfo.getOrderedPartitions().size());
-
-		assertEquals(port, brokerInfo.getHostFor(partition).port);
-		assertEquals(host, brokerInfo.getHostFor(partition).host);
-
-		assertEquals(port, brokerInfo.getHostFor(secondPartition).port);
-		assertEquals(host, brokerInfo.getHostFor(secondPartition).host);
-	}
-
-	@Test
-	public void testSwitchHostForPartition() throws Exception {
-		String host = "localhost";
-		int port = 9092;
-		int partition = 0;
-		addPartition(partition, host, port);
-		GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
-		assertEquals(port, brokerInfo.getHostFor(partition).port);
-		assertEquals(host, brokerInfo.getHostFor(partition).host);
-
-		String newHost = host + "switch";
-		int newPort = port + 1;
-		addPartition(partition, newHost, newPort);
-		brokerInfo = dynamicBrokersReader.getBrokerInfo();
-		assertEquals(newPort, brokerInfo.getHostFor(partition).port);
-		assertEquals(newHost, brokerInfo.getHostFor(partition).host);
-	}
+    private DynamicBrokersReader dynamicBrokersReader;
+    private String masterPath = "/brokers";
+    private String topic = "testing";
+    private CuratorFramework zookeeper;
+    private TestingServer server;
+
+    @Before
+    public void setUp() throws Exception {
+        server = new TestingServer();
+        String connectionString = server.getConnectString();
+        Map conf = new HashMap();
+        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
+        ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
+        zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
+        dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
+        zookeeper.start();
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        server.close();
+    }
+
+    private void addPartition(int id, String host, int port) throws Exception {
+        writePartitionId(id);
+        writeLeader(id, 0);
+        writeLeaderDetails(0, host, port);
+    }
+
+    private void addPartition(int id, int leader, String host, int port) throws Exception {
+        writePartitionId(id);
+        writeLeader(id, leader);
+        writeLeaderDetails(leader, host, port);
+    }
+
+    private void writePartitionId(int id) throws Exception {
+        String path = dynamicBrokersReader.partitionPath();
+        writeDataToPath(path, ("" + id));
+    }
+
+    private void writeDataToPath(String path, String data) throws Exception {
+        ZKPaths.mkdirs(zookeeper.getZookeeperClient().getZooKeeper(), path);
+        zookeeper.setData().forPath(path, data.getBytes());
+    }
+
+    private void writeLeader(int id, int leaderId) throws Exception {
+        String path = dynamicBrokersReader.partitionPath() + "/" + id + "/state";
+        String value = " { \"controller_epoch\":4, \"isr\":[ 1, 0 ], \"leader\":" + leaderId + ", \"leader_epoch\":1, \"version\":1 }";
+        writeDataToPath(path, value);
+    }
+
+    private void writeLeaderDetails(int leaderId, String host, int port) throws Exception {
+        String path = dynamicBrokersReader.brokerPath() + "/" + leaderId;
+        String value = "{ \"host\":\"" + host + "\", \"jmx_port\":9999, \"port\":" + port + ", \"version\":1 }";
+        writeDataToPath(path, value);
+    }
+
+    @Test
+    public void testGetBrokerInfo() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int partition = 0;
+        addPartition(partition, host, port);
+        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
+        assertEquals(1, brokerInfo.getOrderedPartitions().size());
+        assertEquals(port, brokerInfo.getHostFor(partition).port);
+        assertEquals(host, brokerInfo.getHostFor(partition).host);
+    }
+
+
+    @Test
+    public void testMultiplePartitionsOnDifferentHosts() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int secondPort = 9093;
+        int partition = 0;
+        int secondPartition = partition + 1;
+        addPartition(partition, 0, host, port);
+        addPartition(secondPartition, 1, host, secondPort);
+
+        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
+        assertEquals(2, brokerInfo.getOrderedPartitions().size());
+
+        assertEquals(port, brokerInfo.getHostFor(partition).port);
+        assertEquals(host, brokerInfo.getHostFor(partition).host);
+
+        assertEquals(secondPort, brokerInfo.getHostFor(secondPartition).port);
+        assertEquals(host, brokerInfo.getHostFor(secondPartition).host);
+    }
+
+
+    @Test
+    public void testMultiplePartitionsOnSameHost() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int partition = 0;
+        int secondPartition = partition + 1;
+        addPartition(partition, 0, host, port);
+        addPartition(secondPartition, 0, host, port);
+
+        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
+        assertEquals(2, brokerInfo.getOrderedPartitions().size());
+
+        assertEquals(port, brokerInfo.getHostFor(partition).port);
+        assertEquals(host, brokerInfo.getHostFor(partition).host);
+
+        assertEquals(port, brokerInfo.getHostFor(secondPartition).port);
+        assertEquals(host, brokerInfo.getHostFor(secondPartition).host);
+    }
+
+    @Test
+    public void testSwitchHostForPartition() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int partition = 0;
+        addPartition(partition, host, port);
+        GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
+        assertEquals(port, brokerInfo.getHostFor(partition).port);
+        assertEquals(host, brokerInfo.getHostFor(partition).host);
+
+        String newHost = host + "switch";
+        int newPort = port + 1;
+        addPartition(partition, newHost, newPort);
+        brokerInfo = dynamicBrokersReader.getBrokerInfo();
+        assertEquals(newPort, brokerInfo.getHostFor(partition).port);
+        assertEquals(newHost, brokerInfo.getHostFor(partition).host);
+    }
 }


[37/50] [abbrv] git commit: close resources in tests

Posted by pt...@apache.org.
close resources in tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/98cfe930
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/98cfe930
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/98cfe930

Branch: refs/heads/master
Commit: 98cfe93010ff069af2f7c607b11e6735060e2405
Parents: 93ca654
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Apr 5 13:19:46 2014 +0100
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Apr 5 13:21:37 2014 +0100

----------------------------------------------------------------------
 src/test/storm/kafka/DynamicBrokersReaderTest.java | 2 ++
 src/test/storm/kafka/KafkaTestBroker.java          | 1 +
 src/test/storm/kafka/KafkaUtilsTest.java           | 1 +
 src/test/storm/kafka/ZkCoordinatorTest.java        | 4 +++-
 src/test/storm/kafka/bolt/KafkaBoltTest.java       | 1 +
 5 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/98cfe930/src/test/storm/kafka/DynamicBrokersReaderTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/DynamicBrokersReaderTest.java b/src/test/storm/kafka/DynamicBrokersReaderTest.java
index 47387e3..d03bab3 100644
--- a/src/test/storm/kafka/DynamicBrokersReaderTest.java
+++ b/src/test/storm/kafka/DynamicBrokersReaderTest.java
@@ -43,6 +43,8 @@ public class DynamicBrokersReaderTest {
 
     @After
     public void tearDown() throws Exception {
+        dynamicBrokersReader.close();
+        zookeeper.close();
         server.close();
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/98cfe930/src/test/storm/kafka/KafkaTestBroker.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaTestBroker.java b/src/test/storm/kafka/KafkaTestBroker.java
index 7019c86..d2a44a4 100644
--- a/src/test/storm/kafka/KafkaTestBroker.java
+++ b/src/test/storm/kafka/KafkaTestBroker.java
@@ -48,5 +48,6 @@ public class KafkaTestBroker {
 
     public void shutdown() {
         kafka.shutdown();
+        server.stop();
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/98cfe930/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaUtilsTest.java b/src/test/storm/kafka/KafkaUtilsTest.java
index a4e7f52..25fa618 100644
--- a/src/test/storm/kafka/KafkaUtilsTest.java
+++ b/src/test/storm/kafka/KafkaUtilsTest.java
@@ -42,6 +42,7 @@ public class KafkaUtilsTest {
 
     @After
     public void shutdown() {
+        simpleConsumer.close();
         broker.shutdown();
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/98cfe930/src/test/storm/kafka/ZkCoordinatorTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/ZkCoordinatorTest.java b/src/test/storm/kafka/ZkCoordinatorTest.java
index 35b3b4b..c08ce82 100644
--- a/src/test/storm/kafka/ZkCoordinatorTest.java
+++ b/src/test/storm/kafka/ZkCoordinatorTest.java
@@ -31,6 +31,7 @@ public class ZkCoordinatorTest {
     private Map stormConf = new HashMap();
     private SpoutConfig spoutConfig;
     private ZkState state;
+    private SimpleConsumer simpleConsumer;
 
     @Before
     public void setUp() throws Exception {
@@ -42,7 +43,7 @@ public class ZkCoordinatorTest {
         spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
         Map conf = buildZookeeperConfig(server);
         state = new ZkState(conf);
-        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
         when(dynamicPartitionConnections.register(any(Broker.class), anyInt())).thenReturn(simpleConsumer);
     }
 
@@ -58,6 +59,7 @@ public class ZkCoordinatorTest {
 
     @After
     public void shutdown() throws Exception {
+        simpleConsumer.close();
         broker.shutdown();
         server.stop();
     }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/98cfe930/src/test/storm/kafka/bolt/KafkaBoltTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/bolt/KafkaBoltTest.java b/src/test/storm/kafka/bolt/KafkaBoltTest.java
index 129b0f6..fa5a104 100644
--- a/src/test/storm/kafka/bolt/KafkaBoltTest.java
+++ b/src/test/storm/kafka/bolt/KafkaBoltTest.java
@@ -53,6 +53,7 @@ public class KafkaBoltTest {
 
     @After
     public void shutdown() {
+        simpleConsumer.close();
         broker.shutdown();
     }
 


[35/50] [abbrv] git commit: upgraded storm to 0.9.1

Posted by pt...@apache.org.
upgraded storm to 0.9.1


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/9129536c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/9129536c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/9129536c

Branch: refs/heads/master
Commit: 9129536cd91ea2b54df5ba2a4eb524aa064c56a1
Parents: 2f45866
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Mon Mar 31 22:48:08 2014 +0100
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Mon Mar 31 22:48:08 2014 +0100

----------------------------------------------------------------------
 CHANGELOG.md | 1 +
 pom.xml      | 6 +++---
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/9129536c/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 133ffda..f876421 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,6 @@
 ## 0.5.0
 * fixed partition assignment for KafkaSpout
+* upgraded to storm 0.9.1
 ## 0.4.0
 * added support for reading kafka message keys
 * configurable metrics emit interval

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/9129536c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ff9b9c1..4626fbd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -174,9 +174,9 @@
             </exclusions>
         </dependency>
         <dependency>
-            <groupId>storm</groupId>
-            <artifactId>storm</artifactId>
-            <version>0.9.0</version>
+            <groupId>org.apache.storm</groupId>
+            <artifactId>storm-core</artifactId>
+            <version>0.9.1-incubating</version>
             <scope>provided</scope>
         </dependency>
         <dependency>


[22/50] [abbrv] git commit: Merge pull request #22 from dschiavu/configurable-metrics-emit-interval

Posted by pt...@apache.org.
Merge pull request #22 from dschiavu/configurable-metrics-emit-interval

Make the spout's Storm metrics time bucket size configurable via TridentKafkaConfig. Leave the default time bucket size at 60 seconds.

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/71119ce1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/71119ce1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/71119ce1

Branch: refs/heads/master
Commit: 71119ce131a1c618759231ed9d4aca0468ff4055
Parents: 312408a 2a0757a
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Wed Feb 19 22:48:37 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Wed Feb 19 22:48:37 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaConfig.java                 | 1 +
 src/jvm/storm/kafka/trident/TridentKafkaEmitter.java | 6 +++---
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------



[33/50] [abbrv] git commit: align with 0.9.2-incubating dependencies.

Posted by pt...@apache.org.
align with 0.9.2-incubating dependencies.


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/84f04c3f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/84f04c3f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/84f04c3f

Branch: refs/heads/master
Commit: 84f04c3fea11ccb6844bdef7baf8e6f654637981
Parents: e1b6fb4
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Tue Mar 18 15:36:09 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Tue Mar 18 15:36:09 2014 -0400

----------------------------------------------------------------------
 pom.xml                                       | 117 ++-------------------
 src/jvm/storm/kafka/DynamicBrokersReader.java |  18 ++--
 2 files changed, 17 insertions(+), 118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/84f04c3f/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 4626fbd..e6adc5d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2,26 +2,12 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-    <groupId>net.wurstmeister.storm</groupId>
-    <artifactId>storm-kafka-0.8-plus</artifactId>
+    <groupId>org.apache.storm</groupId>
+    <artifactId>storm-kafka</artifactId>
     <packaging>jar</packaging>
-    <version>0.5.0-SNAPSHOT</version>
-    <name>storm-kafka-0.8-plus</name>
-    <description>Storm module for kafka &gt; 0.8</description>
-    <licenses>
-        <license>
-            <name>The Apache Software License, Version 2.0</name>
-            <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
-            <distribution>repo</distribution>
-            <comments>A business-friendly OSS license</comments>
-        </license>
-    </licenses>
-    <url>https://github.com/wurstmeister/storm-kafka-0.8-plus</url>
-    <scm>
-        <connection>scm:git:git://github.com/wurstmeister/storm-kafka-0.8-plus.git</connection>
-        <developerConnection>scm:git:ssh://git@github.com/wurstmeister/storm-kafka-0.8-plus.git</developerConnection>
-        <url>https://github.com/wurstmeister/storm-kafka-0.8-plus</url>
-    </scm>
+    <version>0.9.2-incubating-SNAPSHOT</version>
+    <name>storm-kafka</name>
+    <description>Storm Spouts for Apache Kafka</description>
     <properties>
         <scalaVersion>2.9.2</scalaVersion>
         <kafkaArtifact>kafka_2.9.2</kafkaArtifact>
@@ -29,74 +15,11 @@
     </properties>
     <build>
         <plugins>
-            <plugin>
-                <artifactId>maven-jar-plugin</artifactId>
-                <version>2.4</version>
-                <configuration>
-                    <classifier>${envClassifier}</classifier>
-                </configuration>
-            </plugin>
+
         </plugins>
         <sourceDirectory>src/jvm</sourceDirectory>
         <testSourceDirectory>src/test</testSourceDirectory>
-        <resources>
-            <resource>
-                <directory>resources</directory>
-            </resource>
-        </resources>
-        <testResources>
-            <testResource>
-                <directory>dev-resources</directory>
-            </testResource>
-            <testResource>
-                <directory>resources</directory>
-            </testResource>
-        </testResources>
-        <directory>target</directory>
-        <outputDirectory>target/classes</outputDirectory>
     </build>
-    <repositories>
-        <repository>
-            <id>central</id>
-            <url>http://repo1.maven.org/maven2/</url>
-            <snapshots>
-                <enabled>false</enabled>
-            </snapshots>
-            <releases>
-                <enabled>true</enabled>
-            </releases>
-        </repository>
-        <repository>
-            <id>clojars</id>
-            <url>https://clojars.org/repo/</url>
-            <snapshots>
-                <enabled>true</enabled>
-            </snapshots>
-            <releases>
-                <enabled>true</enabled>
-            </releases>
-        </repository>
-        <repository>
-            <id>scala-tools</id>
-            <url>http://scala-tools.org/repo-releases</url>
-            <snapshots>
-                <enabled>true</enabled>
-            </snapshots>
-            <releases>
-                <enabled>true</enabled>
-            </releases>
-        </repository>
-        <repository>
-            <id>conjars</id>
-            <url>http://conjars.org/repo/</url>
-            <snapshots>
-                <enabled>true</enabled>
-            </snapshots>
-            <releases>
-                <enabled>true</enabled>
-            </releases>
-        </repository>
-    </repositories>
     <dependencies>
         <dependency>
             <groupId>org.mockito</groupId>
@@ -118,7 +41,7 @@
         <dependency>
             <groupId>com.netflix.curator</groupId>
             <artifactId>curator-framework</artifactId>
-            <version>1.0.1</version>
+            <version>1.3.3</version>
             <exclusions>
                 <exclusion>
                     <groupId>log4j</groupId>
@@ -133,7 +56,7 @@
         <dependency>
             <groupId>com.netflix.curator</groupId>
             <artifactId>curator-recipes</artifactId>
-            <version>1.0.1</version>
+            <version>1.3.3</version>
             <exclusions>
                 <exclusion>
                     <groupId>log4j</groupId>
@@ -145,7 +68,7 @@
         <dependency>
             <groupId>com.netflix.curator</groupId>
             <artifactId>curator-test</artifactId>
-            <version>1.0.1</version>
+            <version>1.3.3</version>
             <exclusions>
                 <exclusion>
                     <groupId>log4j</groupId>
@@ -176,30 +99,10 @@
         <dependency>
             <groupId>org.apache.storm</groupId>
             <artifactId>storm-core</artifactId>
-            <version>0.9.1-incubating</version>
-            <scope>provided</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>log4j-over-slf4j</artifactId>
-            <version>1.6.6</version>
-            <scope>provided</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.clojure</groupId>
-            <artifactId>clojure</artifactId>
-            <version>1.4.0</version>
+            <version>0.9.2-incubating-SNAPSHOT</version>
             <scope>provided</scope>
         </dependency>
     </dependencies>
-    <distributionManagement>
-        <repository>
-            <id>clojars</id>
-            <name>Clojars repository</name>
-            <url>https://clojars.org/repo</url>
-        </repository>
-    </distributionManagement>
-
     <profiles>
         <profile>
             <id>Scala-2.10</id>

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/84f04c3f/src/jvm/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/DynamicBrokersReader.java b/src/jvm/storm/kafka/DynamicBrokersReader.java
index 5b1d750..cd751fe 100644
--- a/src/jvm/storm/kafka/DynamicBrokersReader.java
+++ b/src/jvm/storm/kafka/DynamicBrokersReader.java
@@ -26,17 +26,13 @@ public class DynamicBrokersReader {
     public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
         _zkPath = zkPath;
         _topic = topic;
-        try {
-            _curator = CuratorFrameworkFactory.newClient(
-                    zkStr,
-                    Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
-                    15000,
-                    new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
-                            Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
-            _curator.start();
-        } catch (IOException ex) {
-            LOG.error("can't connect to zookeeper");
-        }
+        _curator = CuratorFrameworkFactory.newClient(
+                zkStr,
+                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
+                15000,
+                new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
+                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
+        _curator.start();
     }
 
     /**


[07/50] [abbrv] git commit: Use partition id in zk path

Posted by pt...@apache.org.
Use partition id in zk path

* added topology id to logs


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/b5de86ed
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/b5de86ed
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/b5de86ed

Branch: refs/heads/master
Commit: b5de86ed7f966018454fba667a42010f0f73f490
Parents: 735b87f
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Jan 5 12:17:55 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Jan 5 12:17:55 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/Partition.java        |  2 +-
 src/jvm/storm/kafka/PartitionManager.java | 12 +++++++-----
 2 files changed, 8 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/b5de86ed/src/jvm/storm/kafka/Partition.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/Partition.java b/src/jvm/storm/kafka/Partition.java
index bbb4fbb..96a3ad7 100644
--- a/src/jvm/storm/kafka/Partition.java
+++ b/src/jvm/storm/kafka/Partition.java
@@ -41,7 +41,7 @@ public class Partition implements ISpoutPartition {
 
     @Override
     public String getId() {
-        return toString();
+        return "partition_" + partition;
     }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/b5de86ed/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionManager.java b/src/jvm/storm/kafka/PartitionManager.java
index 623bc10..e3e31db 100644
--- a/src/jvm/storm/kafka/PartitionManager.java
+++ b/src/jvm/storm/kafka/PartitionManager.java
@@ -63,14 +63,16 @@ public class PartitionManager {
 
         String jsonTopologyId = null;
         Long jsonOffset = null;
+        String path = committedPath();
         try {
-            Map<Object, Object> json = _state.readJSON(committedPath());
+            Map<Object, Object> json = _state.readJSON(path);
+            LOG.info("Read partition information from: " + path +  "  --> " + json );
             if (json != null) {
                 jsonTopologyId = (String) ((Map<Object, Object>) json.get("topology")).get("id");
                 jsonOffset = (Long) json.get("offset");
             }
         } catch (Throwable e) {
-            LOG.warn("Error reading and/or parsing at ZkNode: " + committedPath(), e);
+            LOG.warn("Error reading and/or parsing at ZkNode: " + path, e);
         }
 
         if (!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.forceFromStart) {
@@ -81,7 +83,7 @@ public class PartitionManager {
             LOG.info("Setting last commit offset to HEAD.");
         } else {
             _committedTo = jsonOffset;
-            LOG.info("Read last commit offset from zookeeper: " + _committedTo);
+            LOG.info("Read last commit offset from zookeeper: " + _committedTo + "; old topology_id: " + jsonTopologyId + " - new topology_id: " + topologyInstanceId );
         }
 
         LOG.info("Starting Kafka " + _consumer.host() + ":" + id.partition + " from offset " + _committedTo);
@@ -205,11 +207,11 @@ public class PartitionManager {
             LOG.info("Wrote committed offset to ZK: " + committedTo);
             _committedTo = committedTo;
         }
-        LOG.info("Committed offset " + committedTo + " for " + _partition);
+        LOG.info("Committed offset " + committedTo + " for " + _partition + " for topology: " + _topologyInstanceId);
     }
 
     private String committedPath() {
-        return _spoutConfig.zkRoot + "/" + _spoutConfig.id + "/" + _partition;
+        return _spoutConfig.zkRoot + "/" + _spoutConfig.id + "/" + _partition.getId();
     }
 
     public long queryPartitionOffsetLatestTime() {


[32/50] [abbrv] git commit: Merge branch 'master' of github.com:wurstmeister/storm-kafka-0.8-plus

Posted by pt...@apache.org.
Merge branch 'master' of github.com:wurstmeister/storm-kafka-0.8-plus


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/e1b6fb4d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/e1b6fb4d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/e1b6fb4d

Branch: refs/heads/master
Commit: e1b6fb4db4554ba4d8cacc1d1fcdea9eabfcaa17
Parents: c62d2e1 09ae973
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Tue Mar 18 12:35:34 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Tue Mar 18 12:35:34 2014 -0400

----------------------------------------------------------------------
 CHANGELOG.md                                 |   4 +
 pom.xml                                      |   8 +-
 src/jvm/storm/kafka/KafkaUtils.java          |  11 +-
 src/jvm/storm/kafka/bolt/KafkaBolt.java      |  72 +++++++++
 src/test/storm/kafka/bolt/KafkaBoltTest.java | 170 ++++++++++++++++++++++
 5 files changed, 261 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e1b6fb4d/pom.xml
----------------------------------------------------------------------


[41/50] [abbrv] git commit: Merge branch 'master' of /Users/tgoetz/Projects/apache/../other/storm-kafka-0.8-plus

Posted by pt...@apache.org.
Merge branch 'master' of /Users/tgoetz/Projects/apache/../other/storm-kafka-0.8-plus


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/8fafbad9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/8fafbad9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/8fafbad9

Branch: refs/heads/master
Commit: 8fafbad96bce19a3f11add249d613f9009f9cf17
Parents: 84f04c3 adab172
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Wed Apr 9 09:33:50 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Wed Apr 9 09:33:50 2014 -0400

----------------------------------------------------------------------
 CHANGELOG.md                                    |   3 +
 README.md                                       |  10 +-
 src/jvm/storm/kafka/KafkaUtils.java             |  35 ++++-
 src/jvm/storm/kafka/PartitionManager.java       |  34 ++---
 src/jvm/storm/kafka/StaticCoordinator.java      |   7 +-
 src/jvm/storm/kafka/ZkCoordinator.java          |  35 +++--
 .../storm/kafka/DynamicBrokersReaderTest.java   |   2 +
 src/test/storm/kafka/KafkaTestBroker.java       |   1 +
 src/test/storm/kafka/KafkaUtilsTest.java        |  40 ++++++
 src/test/storm/kafka/TestUtils.java             |  20 +++
 src/test/storm/kafka/ZkCoordinatorTest.java     | 130 +++++++++++++++++++
 src/test/storm/kafka/bolt/KafkaBoltTest.java    |   1 +
 12 files changed, 270 insertions(+), 48 deletions(-)
----------------------------------------------------------------------



[24/50] [abbrv] git commit: Added support for reading message key

Posted by pt...@apache.org.
Added support for reading message key


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/4de85c8e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/4de85c8e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/4de85c8e

Branch: refs/heads/master
Commit: 4de85c8e7f6f4244be9eb688c6a4576a9c88c2a1
Parents: f8afa99
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Feb 23 12:48:56 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Feb 23 12:48:56 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaUtils.java             | 22 ++++--
 src/jvm/storm/kafka/KeyValueScheme.java         | 11 +++
 .../kafka/KeyValueSchemeAsMultiScheme.java      | 19 +++++
 src/jvm/storm/kafka/PartitionManager.java       |  2 +-
 src/jvm/storm/kafka/StringKeyValueScheme.java   | 20 ++++++
 src/jvm/storm/kafka/StringScheme.java           | 10 ++-
 .../kafka/trident/TridentKafkaEmitter.java      |  3 +-
 src/test/storm/kafka/KafkaUtilsTest.java        | 76 ++++++++++++++++++--
 .../storm/kafka/StringKeyValueSchemeTest.java   | 38 ++++++++++
 9 files changed, 187 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaUtils.java b/src/jvm/storm/kafka/KafkaUtils.java
index d86519c..300d998 100644
--- a/src/jvm/storm/kafka/KafkaUtils.java
+++ b/src/jvm/storm/kafka/KafkaUtils.java
@@ -1,6 +1,7 @@
 package storm.kafka;
 
 import backtype.storm.metric.api.IMetric;
+import backtype.storm.utils.Utils;
 import kafka.api.FetchRequest;
 import kafka.api.FetchRequestBuilder;
 import kafka.api.PartitionOffsetRequestInfo;
@@ -9,6 +10,7 @@ import kafka.javaapi.FetchResponse;
 import kafka.javaapi.OffsetRequest;
 import kafka.javaapi.consumer.SimpleConsumer;
 import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.Message;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import storm.kafka.trident.IBrokerReader;
@@ -16,10 +18,8 @@ import storm.kafka.trident.StaticBrokerReader;
 import storm.kafka.trident.ZkBrokerReader;
 
 import java.net.ConnectException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
+import java.nio.ByteBuffer;
+import java.util.*;
 
 
 public class KafkaUtils {
@@ -165,4 +165,18 @@ public class KafkaUtils {
         }
         return msgs;
     }
+
+
+    public static Iterable<List<Object>> generateTuples(KafkaConfig kafkaConfig, Message msg) {
+        Iterable<List<Object>> tups;
+        ByteBuffer payload = msg.payload();
+        ByteBuffer key = msg.key();
+        if (key != null && kafkaConfig.scheme instanceof KeyValueSchemeAsMultiScheme) {
+            tups = ((KeyValueSchemeAsMultiScheme) kafkaConfig.scheme).deserializeKeyAndValue(Utils.toByteArray(key), Utils.toByteArray(payload));
+        } else {
+            tups = kafkaConfig.scheme.deserialize(Utils.toByteArray(payload));
+        }
+        return tups;
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/jvm/storm/kafka/KeyValueScheme.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KeyValueScheme.java b/src/jvm/storm/kafka/KeyValueScheme.java
new file mode 100644
index 0000000..df31cb8
--- /dev/null
+++ b/src/jvm/storm/kafka/KeyValueScheme.java
@@ -0,0 +1,11 @@
+package storm.kafka;
+
+import backtype.storm.spout.Scheme;
+
+import java.util.List;
+
+public interface KeyValueScheme extends Scheme {
+
+    public List<Object> deserializeKeyAndValue(byte[] key, byte[] value);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java b/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
new file mode 100644
index 0000000..2412a1c
--- /dev/null
+++ b/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
@@ -0,0 +1,19 @@
+package storm.kafka;
+
+import backtype.storm.spout.SchemeAsMultiScheme;
+import java.util.Arrays;
+import java.util.List;
+
+public class KeyValueSchemeAsMultiScheme extends SchemeAsMultiScheme{
+
+    public KeyValueSchemeAsMultiScheme(KeyValueScheme scheme) {
+        super(scheme);
+    }
+
+    public Iterable<List<Object>> deserializeKeyAndValue(final byte[] key, final byte[] value) {
+        List<Object> o = ((KeyValueScheme)scheme).deserializeKeyAndValue(key, value);
+        if(o == null) return null;
+        else return Arrays.asList(o);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionManager.java b/src/jvm/storm/kafka/PartitionManager.java
index f12c0d9..fc9d817 100644
--- a/src/jvm/storm/kafka/PartitionManager.java
+++ b/src/jvm/storm/kafka/PartitionManager.java
@@ -112,7 +112,7 @@ public class PartitionManager {
             if (toEmit == null) {
                 return EmitState.NO_EMITTED;
             }
-            Iterable<List<Object>> tups = _spoutConfig.scheme.deserialize(Utils.toByteArray(toEmit.msg.payload()));
+            Iterable<List<Object>> tups = KafkaUtils.generateTuples(_spoutConfig, toEmit.msg);
             if (tups != null) {
                 for (List<Object> tup : tups) {
                     collector.emit(tup, new KafkaMessageId(_partition, toEmit.offset));

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/jvm/storm/kafka/StringKeyValueScheme.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/StringKeyValueScheme.java b/src/jvm/storm/kafka/StringKeyValueScheme.java
new file mode 100644
index 0000000..a6adddb
--- /dev/null
+++ b/src/jvm/storm/kafka/StringKeyValueScheme.java
@@ -0,0 +1,20 @@
+package storm.kafka;
+
+import backtype.storm.tuple.Values;
+import com.google.common.collect.ImmutableMap;
+
+import java.util.List;
+
+public class StringKeyValueScheme extends StringScheme implements KeyValueScheme {
+
+    @Override
+    public List<Object> deserializeKeyAndValue(byte[] key, byte[] value) {
+        if ( key == null ) {
+            return deserialize(value);
+        }
+        String keyString = StringScheme.deserializeString(key);
+        String valueString = StringScheme.deserializeString(value);
+        return new Values(ImmutableMap.of(keyString, valueString));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/jvm/storm/kafka/StringScheme.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/StringScheme.java b/src/jvm/storm/kafka/StringScheme.java
index c4d8270..a809448 100644
--- a/src/jvm/storm/kafka/StringScheme.java
+++ b/src/jvm/storm/kafka/StringScheme.java
@@ -9,15 +9,21 @@ import java.util.List;
 
 public class StringScheme implements Scheme {
 
+    public static final String STRING_SCHEME_KEY = "str";
+
     public List<Object> deserialize(byte[] bytes) {
+        return new Values(deserializeString(bytes));
+    }
+
+    public static String deserializeString(byte[] string) {
         try {
-            return new Values(new String(bytes, "UTF-8"));
+            return new String(string, "UTF-8");
         } catch (UnsupportedEncodingException e) {
             throw new RuntimeException(e);
         }
     }
 
     public Fields getOutputFields() {
-        return new Fields("str");
+        return new Fields(STRING_SCHEME_KEY);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
index 66785f0..973ce8f 100644
--- a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
+++ b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
@@ -155,8 +155,7 @@ public class TridentKafkaEmitter {
     }
 
     private void emit(TridentCollector collector, Message msg) {
-        Iterable<List<Object>> values =
-                _config.scheme.deserialize(Utils.toByteArray(msg.payload()));
+        Iterable<List<Object>> values = KafkaUtils.generateTuples(_config, msg);
         if (values != null) {
             for (List<Object> value : values) {
                 collector.emit(value);

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaUtilsTest.java b/src/test/storm/kafka/KafkaUtilsTest.java
index 20a4221..db270c2 100644
--- a/src/test/storm/kafka/KafkaUtilsTest.java
+++ b/src/test/storm/kafka/KafkaUtilsTest.java
@@ -1,10 +1,13 @@
 package storm.kafka;
 
+import backtype.storm.spout.SchemeAsMultiScheme;
 import backtype.storm.utils.Utils;
+import com.google.common.collect.ImmutableMap;
 import kafka.api.OffsetRequest;
 import kafka.javaapi.consumer.SimpleConsumer;
 import kafka.javaapi.message.ByteBufferMessageSet;
 import kafka.javaapi.producer.Producer;
+import kafka.message.MessageAndOffset;
 import kafka.producer.KeyedMessage;
 import kafka.producer.ProducerConfig;
 import org.junit.After;
@@ -12,10 +15,12 @@ import org.junit.Before;
 import org.junit.Test;
 import storm.kafka.trident.GlobalPartitionInformation;
 
+import java.util.List;
 import java.util.Properties;
 
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 
 public class KafkaUtilsTest {
@@ -90,19 +95,80 @@ public class KafkaUtilsTest {
         assertThat(earliestOffset, is(equalTo(offsetFromConfig)));
     }
 
-    private String createTopicAndSendMessage() {
+    @Test
+    public void generateTuplesWithoutKeyAndKeyValueScheme() {
+        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
+        runGetValueOnlyTuplesTest();
+    }
+
+    @Test
+    public void generateTuplesWithKeyAndKeyValueScheme() {
+        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
+        String value = "value";
+        String key = "key";
+        createTopicAndSendMessage(key, value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
+            assertEquals(ImmutableMap.of(key, value), lists.iterator().next().get(0));
+        }
+    }
+
+    @Test
+    public void generateTupelsWithValueScheme() {
+        config.scheme = new SchemeAsMultiScheme(new StringScheme());
+        runGetValueOnlyTuplesTest();
+    }
+
+    @Test
+    public void generateTuplesWithValueSchemeAndKeyValueMessage() {
+        config.scheme = new SchemeAsMultiScheme(new StringScheme());
+        String value = "value";
+        String key = "key";
+        createTopicAndSendMessage(key, value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
+            assertEquals(value, lists.iterator().next().get(0));
+        }
+    }
+
+    private ByteBufferMessageSet getLastMessage() {
+        long offsetOfLastMessage = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
+        return KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offsetOfLastMessage);
+    }
+
+    private void runGetValueOnlyTuplesTest() {
+        String value = "value";
+        createTopicAndSendMessage(null, value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
+            assertEquals(value, lists.iterator().next().get(0));
+        }
+    }
+
+
+    private void createTopicAndSendMessage() {
+        createTopicAndSendMessage(null, "someValue");
+    }
+
+    private void createTopicAndSendMessage(String value) {
+        createTopicAndSendMessage(null, value);
+    }
+
+    private void createTopicAndSendMessage(String key, String value) {
         Properties p = new Properties();
         p.setProperty("metadata.broker.list", "localhost:49123");
         p.setProperty("serializer.class", "kafka.serializer.StringEncoder");
         ProducerConfig producerConfig = new ProducerConfig(p);
         Producer<String, String> producer = new Producer<String, String>(producerConfig);
-        String value = "value";
-        producer.send(new KeyedMessage<String, String>(config.topic, value));
-        return value;
+        producer.send(new KeyedMessage<String, String>(config.topic, key, value));
     }
 
     private void sendMessageAndAssertValueForOffset(long offset) {
-        String value = createTopicAndSendMessage();
+        String value = "test";
+        createTopicAndSendMessage(value);
         ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
         String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
         assertThat(message, is(equalTo(value)));

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4de85c8e/src/test/storm/kafka/StringKeyValueSchemeTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/StringKeyValueSchemeTest.java b/src/test/storm/kafka/StringKeyValueSchemeTest.java
new file mode 100644
index 0000000..4413c7b
--- /dev/null
+++ b/src/test/storm/kafka/StringKeyValueSchemeTest.java
@@ -0,0 +1,38 @@
+package storm.kafka;
+
+import backtype.storm.tuple.Fields;
+import com.google.common.collect.ImmutableMap;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class StringKeyValueSchemeTest {
+
+    private StringKeyValueScheme scheme = new StringKeyValueScheme();
+
+    @Test
+    public void testDeserialize() throws Exception {
+        assertEquals(Arrays.asList("test"), scheme.deserialize("test".getBytes()));
+    }
+
+    @Test
+    public void testGetOutputFields() throws Exception {
+        Fields outputFields = scheme.getOutputFields();
+        assertTrue(outputFields.contains(StringScheme.STRING_SCHEME_KEY));
+        assertEquals(1, outputFields.size());
+    }
+
+    @Test
+    public void testDeserializeWithNullKeyAndValue() throws Exception {
+        assertEquals(Arrays.asList("test"), scheme.deserializeKeyAndValue(null, "test".getBytes()));
+    }
+
+    @Test
+    public void testDeserializeWithKeyAndValue() throws Exception {
+        assertEquals(Arrays.asList(ImmutableMap.of("key", "test")),
+                scheme.deserializeKeyAndValue("key".getBytes(), "test".getBytes()));
+    }
+}


[28/50] [abbrv] git commit: added Kafka bolt

Posted by pt...@apache.org.
added Kafka bolt


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/118cec43
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/118cec43
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/118cec43

Branch: refs/heads/master
Commit: 118cec43b19eec2ed1d66f6dd0aac7209edfde96
Parents: f573001
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Wed Feb 26 22:38:21 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Thu Feb 27 22:19:24 2014 +0000

----------------------------------------------------------------------
 pom.xml                                      |   8 +-
 src/jvm/storm/kafka/bolt/KafkaBolt.java      |  72 +++++++++
 src/test/storm/kafka/bolt/KafkaBoltTest.java | 170 ++++++++++++++++++++++
 3 files changed, 249 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/118cec43/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index c542158..ff9b9c1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
     <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.4.0</version>
+    <version>0.5.0-SNAPSHOT</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
     <licenses>
@@ -99,6 +99,12 @@
     </repositories>
     <dependencies>
         <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-all</artifactId>
+            <version>1.9.0</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
             <version>${scalaVersion}</version>

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/118cec43/src/jvm/storm/kafka/bolt/KafkaBolt.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/bolt/KafkaBolt.java b/src/jvm/storm/kafka/bolt/KafkaBolt.java
new file mode 100644
index 0000000..89969d9
--- /dev/null
+++ b/src/jvm/storm/kafka/bolt/KafkaBolt.java
@@ -0,0 +1,72 @@
+package storm.kafka.bolt;
+
+import backtype.storm.task.OutputCollector;
+import backtype.storm.task.TopologyContext;
+import backtype.storm.topology.OutputFieldsDeclarer;
+import backtype.storm.topology.base.BaseRichBolt;
+import backtype.storm.tuple.Tuple;
+import kafka.javaapi.producer.Producer;
+import kafka.producer.KeyedMessage;
+import kafka.producer.ProducerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.Properties;
+
+
+/**
+ * Bolt implementation that can send Tuple data to Kafka
+ * <p/>
+ * It expects the producer configuration and topic in storm config under
+ * <p/>
+ * 'kafka.broker.properties' and 'topic'
+ * <p/>
+ * respectively.
+ */
+public class KafkaBolt<K, V> extends BaseRichBolt {
+
+    private static final Logger LOG = LoggerFactory.getLogger(KafkaBolt.class);
+
+    public static final String TOPIC = "topic";
+    public static final String KAFKA_BROKER_PROPERTIES = "kafka.broker.properties";
+
+    public static final String BOLT_KEY = "key";
+    public static final String BOLT_MESSAGE = "message";
+
+    private Producer<K, V> producer;
+    private OutputCollector collector;
+    private String topic;
+
+    @Override
+    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
+        Map configMap = (Map) stormConf.get(KAFKA_BROKER_PROPERTIES);
+        Properties properties = new Properties();
+        properties.putAll(configMap);
+        ProducerConfig config = new ProducerConfig(properties);
+        producer = new Producer<K, V>(config);
+        this.topic = (String) stormConf.get(TOPIC);
+        this.collector = collector;
+    }
+
+    @Override
+    public void execute(Tuple input) {
+        K key = null;
+        if (input.contains(BOLT_KEY)) {
+            key = (K) input.getValueByField(BOLT_KEY);
+        }
+        V message = (V) input.getValueByField(BOLT_MESSAGE);
+        try {
+            producer.send(new KeyedMessage<K, V>(topic, key, message));
+        } catch (Exception ex) {
+            LOG.error("Could not send message with key '" + key + "' and value '" + message + "'", ex);
+        } finally {
+            collector.ack(input);
+        }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/118cec43/src/test/storm/kafka/bolt/KafkaBoltTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/bolt/KafkaBoltTest.java b/src/test/storm/kafka/bolt/KafkaBoltTest.java
new file mode 100644
index 0000000..129b0f6
--- /dev/null
+++ b/src/test/storm/kafka/bolt/KafkaBoltTest.java
@@ -0,0 +1,170 @@
+package storm.kafka.bolt;
+
+import backtype.storm.Config;
+import backtype.storm.task.GeneralTopologyContext;
+import backtype.storm.task.IOutputCollector;
+import backtype.storm.task.OutputCollector;
+import backtype.storm.topology.TopologyBuilder;
+import backtype.storm.tuple.Fields;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.TupleImpl;
+import backtype.storm.tuple.Values;
+import backtype.storm.utils.Utils;
+import kafka.api.OffsetRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.Message;
+import kafka.message.MessageAndOffset;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import storm.kafka.*;
+import storm.kafka.trident.GlobalPartitionInformation;
+
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.verify;
+
+public class KafkaBoltTest {
+
+    private static final String TEST_TOPIC = "test-topic";
+    private KafkaTestBroker broker;
+    private KafkaBolt bolt;
+    private Config config = new Config();
+    private KafkaConfig kafkaConfig;
+    private SimpleConsumer simpleConsumer;
+
+    @Mock
+    private IOutputCollector collector;
+
+    @Before
+    public void initMocks() {
+        MockitoAnnotations.initMocks(this);
+        broker = new KafkaTestBroker();
+        setupKafkaConsumer();
+        config.put(KafkaBolt.TOPIC, TEST_TOPIC);
+        bolt = generateStringSerializerBolt();
+    }
+
+    @After
+    public void shutdown() {
+        broker.shutdown();
+    }
+
+
+    private void setupKafkaConsumer() {
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
+        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
+        BrokerHosts brokerHosts = new StaticHosts(globalPartitionInformation);
+        kafkaConfig = new KafkaConfig(brokerHosts, TEST_TOPIC);
+        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+    }
+
+    @Test
+    public void executeWithKey() throws Exception {
+        String message = "value-123";
+        String key = "key-123";
+        Tuple tuple = generateTestTuple(key, message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(key, message);
+    }
+
+    @Test
+    public void executeWithByteArrayKeyAndMessage() {
+        bolt = generateDefaultSerializerBolt();
+        String keyString = "test-key";
+        String messageString = "test-message";
+        byte[] key = keyString.getBytes();
+        byte[] message = messageString.getBytes();
+        Tuple tuple = generateTestTuple(key, message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(keyString, messageString);
+    }
+
+    private KafkaBolt generateStringSerializerBolt() {
+        KafkaBolt bolt = new KafkaBolt();
+        Properties props = new Properties();
+        props.put("metadata.broker.list", broker.getBrokerConnectionString());
+        props.put("request.required.acks", "1");
+        props.put("serializer.class", "kafka.serializer.StringEncoder");
+        config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
+        bolt.prepare(config, null, new OutputCollector(collector));
+        return bolt;
+    }
+
+    private KafkaBolt generateDefaultSerializerBolt() {
+        KafkaBolt bolt = new KafkaBolt();
+        Properties props = new Properties();
+        props.put("metadata.broker.list", broker.getBrokerConnectionString());
+        props.put("request.required.acks", "1");
+        config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
+        bolt.prepare(config, null, new OutputCollector(collector));
+        return bolt;
+    }
+
+    @Test
+    public void executeWithoutKey() throws Exception {
+        String message = "value-234";
+        Tuple tuple = generateTestTuple(message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(null, message);
+    }
+
+
+    @Test
+    public void executeWithBrokerDown() throws Exception {
+        broker.shutdown();
+        String message = "value-234";
+        Tuple tuple = generateTestTuple(message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+    }
+
+
+    private boolean verifyMessage(String key, String message) {
+        long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
+        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
+        MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
+        Message kafkaMessage = messageAndOffset.message();
+        ByteBuffer messageKeyBuffer = kafkaMessage.key();
+        String keyString = null;
+        String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
+        if (messageKeyBuffer != null) {
+            keyString = new String(Utils.toByteArray(messageKeyBuffer));
+        }
+        assertEquals(key, keyString);
+        assertEquals(message, messageString);
+        return true;
+    }
+
+    private Tuple generateTestTuple(Object key, Object message) {
+        TopologyBuilder builder = new TopologyBuilder();
+        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
+            @Override
+            public Fields getComponentOutputFields(String componentId, String streamId) {
+                return new Fields("key", "message");
+            }
+        };
+        return new TupleImpl(topologyContext, new Values(key, message), 1, "");
+    }
+
+    private Tuple generateTestTuple(Object message) {
+        TopologyBuilder builder = new TopologyBuilder();
+        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
+            @Override
+            public Fields getComponentOutputFields(String componentId, String streamId) {
+                return new Fields("message");
+            }
+        };
+        return new TupleImpl(topologyContext, new Values(message), 1, "");
+    }
+}


[44/50] [abbrv] git commit: update to kafka 0.8.1

Posted by pt...@apache.org.
update to kafka 0.8.1


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/a72aafae
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/a72aafae
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/a72aafae

Branch: refs/heads/master
Commit: a72aafae503c8314b26330e0d4f440fb4632a483
Parents: 7d1bf2a
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Wed Apr 9 10:33:16 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Wed Apr 9 10:33:16 2014 -0400

----------------------------------------------------------------------
 external/storm-kafka/pom.xml                                  | 2 +-
 .../storm-kafka/src/test/storm/kafka/KafkaTestBroker.java     | 7 ++++++-
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/a72aafae/external/storm-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-kafka/pom.xml b/external/storm-kafka/pom.xml
index 15743b6..9385678 100644
--- a/external/storm-kafka/pom.xml
+++ b/external/storm-kafka/pom.xml
@@ -106,7 +106,7 @@
         <dependency>
             <groupId>org.apache.kafka</groupId>
             <artifactId>${kafkaArtifact}</artifactId>
-            <version>0.8.0</version>
+            <version>0.8.1</version>
             <exclusions>
                 <exclusion>
                     <groupId>org.apache.zookeeper</groupId>

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/a72aafae/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java b/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
index d2a44a4..db4663d 100644
--- a/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
+++ b/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
@@ -6,6 +6,7 @@ import com.netflix.curator.retry.ExponentialBackoffRetry;
 import com.netflix.curator.test.TestingServer;
 import kafka.server.KafkaServerStartable;
 
+import java.io.IOException;
 import java.util.Properties;
 
 /**
@@ -48,6 +49,10 @@ public class KafkaTestBroker {
 
     public void shutdown() {
         kafka.shutdown();
-        server.stop();
+        try {
+            server.stop();
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
     }
 }


[10/50] [abbrv] git commit: Added error handling for fetch request

Posted by pt...@apache.org.
Added error handling for fetch request

* allow retry of fetch if offset was invalid
* updated changelog


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/95c60dbb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/95c60dbb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/95c60dbb

Branch: refs/heads/master
Commit: 95c60dbbbc80165969bae6cbbd1926207720e59c
Parents: 6b29f8f
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Jan 11 16:34:01 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Jan 11 16:40:02 2014 +0000

----------------------------------------------------------------------
 CHANGELOG.md                                    |   1 +
 src/jvm/storm/kafka/FailedFetchException.java   |  12 ++
 src/jvm/storm/kafka/KafkaConfig.java            |   5 +-
 src/jvm/storm/kafka/KafkaError.java             |  29 ++++
 src/jvm/storm/kafka/KafkaSpout.java             |  24 ---
 src/jvm/storm/kafka/KafkaUtils.java             | 159 +++++++++++++++++++
 src/jvm/storm/kafka/PartitionManager.java       |  11 +-
 src/jvm/storm/kafka/trident/Coordinator.java    |   1 +
 .../kafka/trident/FailedFetchException.java     |   7 -
 src/jvm/storm/kafka/trident/KafkaUtils.java     | 112 -------------
 .../kafka/trident/TridentKafkaEmitter.java      |  21 +--
 src/test/storm/kafka/KafkaTestBroker.java       |  52 ++++++
 src/test/storm/kafka/KafkaUtilsTest.java        |  90 +++++++++++
 13 files changed, 350 insertions(+), 174 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2fb81fe..ced0ffc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,2 +1,3 @@
 ## 0.3.0
 * updated partition path in zookeeper
+* added error handling for fetch request

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/FailedFetchException.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/FailedFetchException.java b/src/jvm/storm/kafka/FailedFetchException.java
new file mode 100644
index 0000000..0bd1123
--- /dev/null
+++ b/src/jvm/storm/kafka/FailedFetchException.java
@@ -0,0 +1,12 @@
+package storm.kafka;
+
+public class FailedFetchException extends RuntimeException {
+
+    public FailedFetchException(String message) {
+        super(message);
+    }
+
+    public FailedFetchException(Exception e) {
+        super(e);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/KafkaConfig.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaConfig.java b/src/jvm/storm/kafka/KafkaConfig.java
index e241978..dddcead 100644
--- a/src/jvm/storm/kafka/KafkaConfig.java
+++ b/src/jvm/storm/kafka/KafkaConfig.java
@@ -17,6 +17,7 @@ public class KafkaConfig implements Serializable {
     public MultiScheme scheme = new RawMultiScheme();
     public boolean forceFromStart = false;
     public long startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
+    public boolean useStartOffsetTimeIfOffsetOutOfRange = true;
 
     public KafkaConfig(BrokerHosts hosts, String topic) {
         this(hosts, topic, kafka.api.OffsetRequest.DefaultClientId());
@@ -28,8 +29,4 @@ public class KafkaConfig implements Serializable {
         this.clientId = clientId;
     }
 
-    public void forceStartOffsetTime(long millis) {
-        startOffsetTime = millis;
-        forceFromStart = true;
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/KafkaError.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaError.java b/src/jvm/storm/kafka/KafkaError.java
new file mode 100644
index 0000000..260ab91
--- /dev/null
+++ b/src/jvm/storm/kafka/KafkaError.java
@@ -0,0 +1,29 @@
+package storm.kafka;
+
+/**
+ * Date: 11/01/2014
+ * Time: 14:21
+ */
+public enum KafkaError {
+    NO_ERROR,
+    OFFSET_OUT_OF_RANGE,
+    INVALID_MESSAGE,
+    UNKNOWN_TOPIC_OR_PARTITION,
+    INVALID_FETCH_SIZE,
+    LEADER_NOT_AVAILABLE,
+    NOT_LEADER_FOR_PARTITION,
+    REQUEST_TIMED_OUT,
+    BROKER_NOT_AVAILABLE,
+    REPLICA_NOT_AVAILABLE,
+    MESSAGE_SIZE_TOO_LARGE,
+    STALE_CONTROLLER_EPOCH,
+    UNKNOWN;
+
+    public static KafkaError getError(short errorCode) {
+        if (errorCode < 0) {
+            return UNKNOWN;
+        } else {
+            return values()[errorCode];
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/KafkaSpout.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaSpout.java b/src/jvm/storm/kafka/KafkaSpout.java
index cf407ad..d097510 100644
--- a/src/jvm/storm/kafka/KafkaSpout.java
+++ b/src/jvm/storm/kafka/KafkaSpout.java
@@ -10,7 +10,6 @@ import kafka.message.Message;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import storm.kafka.PartitionManager.KafkaMessageId;
-import storm.kafka.trident.KafkaUtils;
 
 import java.util.*;
 
@@ -171,27 +170,4 @@ public class KafkaSpout extends BaseRichSpout {
         }
     }
 
-    public static void main(String[] args) {
-//        TopologyBuilder builder = new TopologyBuilder();
-//        List<String> hosts = new ArrayList<String>();
-//        hosts.add("localhost");
-//        SpoutConfig spoutConf = SpoutConfig.fromHostStrings(hosts, 8, "clicks", "/kafkastorm", "id");
-//        spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
-//        spoutConf.forceStartOffsetTime(-2);
-//
-// //       spoutConf.zkServers = new ArrayList<String>() {{
-// //          add("localhost");
-// //       }};
-// //       spoutConf.zkPort = 2181;
-//
-//        builder.setSpout("spout", new KafkaSpout(spoutConf), 3);
-//
-//        Config conf = new Config();
-//        //conf.setDebug(true);
-//
-//        LocalCluster cluster = new LocalCluster();
-//        cluster.submitTopology("kafka-test", conf, builder.createTopology());
-//
-//        Utils.sleep(600000);
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaUtils.java b/src/jvm/storm/kafka/KafkaUtils.java
new file mode 100644
index 0000000..5094f14
--- /dev/null
+++ b/src/jvm/storm/kafka/KafkaUtils.java
@@ -0,0 +1,159 @@
+package storm.kafka;
+
+import backtype.storm.metric.api.IMetric;
+import kafka.api.FetchRequest;
+import kafka.api.FetchRequestBuilder;
+import kafka.api.PartitionOffsetRequestInfo;
+import kafka.common.TopicAndPartition;
+import kafka.javaapi.FetchResponse;
+import kafka.javaapi.OffsetRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import storm.kafka.trident.IBrokerReader;
+import storm.kafka.trident.StaticBrokerReader;
+import storm.kafka.trident.ZkBrokerReader;
+
+import java.net.ConnectException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+
+public class KafkaUtils {
+
+    public static final Logger LOG = LoggerFactory.getLogger(KafkaUtils.class);
+    private static final int NO_OFFSET = -5;
+
+
+    public static IBrokerReader makeBrokerReader(Map stormConf, KafkaConfig conf) {
+        if (conf.hosts instanceof StaticHosts) {
+            return new StaticBrokerReader(((StaticHosts) conf.hosts).getPartitionInformation());
+        } else {
+            return new ZkBrokerReader(stormConf, conf.topic, (ZkHosts) conf.hosts);
+        }
+    }
+
+    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
+        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
+        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
+        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
+        OffsetRequest request = new OffsetRequest(
+                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
+
+        long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
+        if (offsets.length > 0) {
+            return offsets[0];
+        } else {
+            return NO_OFFSET;
+        }
+    }
+
+    public static class KafkaOffsetMetric implements IMetric {
+        Map<Partition, Long> _partitionToOffset = new HashMap<Partition, Long>();
+        Set<Partition> _partitions;
+        String _topic;
+        DynamicPartitionConnections _connections;
+
+        public KafkaOffsetMetric(String topic, DynamicPartitionConnections connections) {
+            _topic = topic;
+            _connections = connections;
+        }
+
+        public void setLatestEmittedOffset(Partition partition, long offset) {
+            _partitionToOffset.put(partition, offset);
+        }
+
+        @Override
+        public Object getValueAndReset() {
+            try {
+                long totalSpoutLag = 0;
+                long totalLatestTimeOffset = 0;
+                long totalLatestEmittedOffset = 0;
+                HashMap ret = new HashMap();
+                if (_partitions != null && _partitions.size() == _partitionToOffset.size()) {
+                    for (Map.Entry<Partition, Long> e : _partitionToOffset.entrySet()) {
+                        Partition partition = e.getKey();
+                        SimpleConsumer consumer = _connections.getConnection(partition);
+                        if (consumer == null) {
+                            LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
+                            return null;
+                        }
+                        long latestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
+                        if (latestTimeOffset == 0) {
+                            LOG.warn("No data found in Kafka Partition " + partition.getId());
+                            return null;
+                        }
+                        long latestEmittedOffset = e.getValue();
+                        long spoutLag = latestTimeOffset - latestEmittedOffset;
+                        ret.put(partition.getId() + "/" + "spoutLag", spoutLag);
+                        ret.put(partition.getId() + "/" + "latestTime", latestTimeOffset);
+                        ret.put(partition.getId() + "/" + "latestEmittedOffset", latestEmittedOffset);
+                        totalSpoutLag += spoutLag;
+                        totalLatestTimeOffset += latestTimeOffset;
+                        totalLatestEmittedOffset += latestEmittedOffset;
+                    }
+                    ret.put("totalSpoutLag", totalSpoutLag);
+                    ret.put("totalLatestTime", totalLatestTimeOffset);
+                    ret.put("totalLatestEmittedOffset", totalLatestEmittedOffset);
+                    return ret;
+                } else {
+                    LOG.info("Metrics Tick: Not enough data to calculate spout lag.");
+                }
+            } catch (Throwable t) {
+                LOG.warn("Metrics Tick: Exception when computing kafkaOffset metric.", t);
+            }
+            return null;
+        }
+
+        public void refreshPartitions(Set<Partition> partitions) {
+            _partitions = partitions;
+            Iterator<Partition> it = _partitionToOffset.keySet().iterator();
+            while (it.hasNext()) {
+                if (!partitions.contains(it.next())) {
+                    it.remove();
+                }
+            }
+        }
+    }
+
+    public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) {
+        ByteBufferMessageSet msgs = null;
+        String topic = config.topic;
+        int partitionId = partition.partition;
+        for (int errors = 0; errors < 2 && msgs == null; errors++) {
+            FetchRequestBuilder builder = new FetchRequestBuilder();
+            FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
+                    clientId(config.clientId).build();
+            FetchResponse fetchResponse;
+            try {
+                fetchResponse = consumer.fetch(fetchRequest);
+            } catch (Exception e) {
+                if (e instanceof ConnectException) {
+                    throw new FailedFetchException(e);
+                } else {
+                    throw new RuntimeException(e);
+                }
+            }
+            if (fetchResponse.hasError()) {
+                KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
+                if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) {
+                    long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime);
+                    LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " +
+                            "retrying with default start offset time from configuration. " +
+                            "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]");
+                    offset = startOffset;
+                } else {
+                    String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
+                    LOG.error(message);
+                    throw new FailedFetchException(message);
+                }
+            } else {
+                msgs = fetchResponse.messageSet(topic, partitionId);
+            }
+        }
+        return msgs;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/PartitionManager.java b/src/jvm/storm/kafka/PartitionManager.java
index e3e31db..0861c25 100644
--- a/src/jvm/storm/kafka/PartitionManager.java
+++ b/src/jvm/storm/kafka/PartitionManager.java
@@ -8,7 +8,6 @@ import backtype.storm.metric.api.ReducedMetric;
 import backtype.storm.spout.SpoutOutputCollector;
 import backtype.storm.utils.Utils;
 import com.google.common.collect.ImmutableMap;
-import kafka.api.FetchRequestBuilder;
 import kafka.api.OffsetRequest;
 import kafka.javaapi.consumer.SimpleConsumer;
 import kafka.javaapi.message.ByteBufferMessageSet;
@@ -17,7 +16,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import storm.kafka.KafkaSpout.EmitState;
 import storm.kafka.KafkaSpout.MessageAndRealOffset;
-import storm.kafka.trident.KafkaUtils;
 import storm.kafka.trident.MaxMetric;
 
 import java.util.*;
@@ -132,15 +130,8 @@ public class PartitionManager {
     }
 
     private void fill() {
-        //LOG.info("Fetching from Kafka: " + _consumer.host() + ":" + _partition.partition + " from offset " + _emittedToOffset);
         long start = System.nanoTime();
-        ByteBufferMessageSet msgs = _consumer.fetch(
-                new FetchRequestBuilder().addFetch(
-                        _spoutConfig.topic,
-                        _partition.partition,
-                        _emittedToOffset,
-                        _spoutConfig.fetchSizeBytes).build()).messageSet(_spoutConfig.topic,
-                _partition.partition);
+        ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, _emittedToOffset);
         long end = System.nanoTime();
         long millis = (end - start) / 1000000;
         _fetchAPILatencyMax.update(millis);

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/trident/Coordinator.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/Coordinator.java b/src/jvm/storm/kafka/trident/Coordinator.java
index d97feed..f67acaa 100644
--- a/src/jvm/storm/kafka/trident/Coordinator.java
+++ b/src/jvm/storm/kafka/trident/Coordinator.java
@@ -1,5 +1,6 @@
 package storm.kafka.trident;
 
+import storm.kafka.KafkaUtils;
 import storm.trident.spout.IOpaquePartitionedTridentSpout;
 import storm.trident.spout.IPartitionedTridentSpout;
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/trident/FailedFetchException.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/FailedFetchException.java b/src/jvm/storm/kafka/trident/FailedFetchException.java
deleted file mode 100644
index c4fcc61..0000000
--- a/src/jvm/storm/kafka/trident/FailedFetchException.java
+++ /dev/null
@@ -1,7 +0,0 @@
-package storm.kafka.trident;
-
-public class FailedFetchException extends RuntimeException {
-    public FailedFetchException(Exception e) {
-        super(e);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/trident/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/KafkaUtils.java b/src/jvm/storm/kafka/trident/KafkaUtils.java
deleted file mode 100644
index e4ba3b3..0000000
--- a/src/jvm/storm/kafka/trident/KafkaUtils.java
+++ /dev/null
@@ -1,112 +0,0 @@
-package storm.kafka.trident;
-
-import backtype.storm.metric.api.IMetric;
-import kafka.api.PartitionOffsetRequestInfo;
-import kafka.common.TopicAndPartition;
-import kafka.javaapi.OffsetRequest;
-import kafka.javaapi.consumer.SimpleConsumer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.*;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-public class KafkaUtils {
-    public static final Logger LOG = LoggerFactory.getLogger(KafkaUtils.class);
-    private static final int NO_OFFSET = -5;
-
-
-    public static IBrokerReader makeBrokerReader(Map stormConf, KafkaConfig conf) {
-        if (conf.hosts instanceof StaticHosts) {
-            return new StaticBrokerReader(((StaticHosts) conf.hosts).getPartitionInformation());
-        } else {
-            return new ZkBrokerReader(stormConf, conf.topic, (ZkHosts) conf.hosts);
-        }
-    }
-
-    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
-        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
-        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
-        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
-        OffsetRequest request = new OffsetRequest(
-                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
-
-        long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
-        if (offsets.length > 0) {
-            return offsets[0];
-        } else {
-            return NO_OFFSET;
-        }
-    }
-
-    public static class KafkaOffsetMetric implements IMetric {
-        Map<Partition, Long> _partitionToOffset = new HashMap<Partition, Long>();
-        Set<Partition> _partitions;
-        String _topic;
-        DynamicPartitionConnections _connections;
-
-        public KafkaOffsetMetric(String topic, DynamicPartitionConnections connections) {
-            _topic = topic;
-            _connections = connections;
-        }
-
-        public void setLatestEmittedOffset(Partition partition, long offset) {
-            _partitionToOffset.put(partition, offset);
-        }
-
-        @Override
-        public Object getValueAndReset() {
-            try {
-                long totalSpoutLag = 0;
-                long totalLatestTimeOffset = 0;
-                long totalLatestEmittedOffset = 0;
-                HashMap ret = new HashMap();
-                if (_partitions != null && _partitions.size() == _partitionToOffset.size()) {
-                    for (Map.Entry<Partition, Long> e : _partitionToOffset.entrySet()) {
-                        Partition partition = e.getKey();
-                        SimpleConsumer consumer = _connections.getConnection(partition);
-                        if (consumer == null) {
-                            LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
-                            return null;
-                        }
-                        long latestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
-                        if (latestTimeOffset == 0) {
-                            LOG.warn("No data found in Kafka Partition " + partition.getId());
-                            return null;
-                        }
-                        long latestEmittedOffset = e.getValue();
-                        long spoutLag = latestTimeOffset - latestEmittedOffset;
-                        ret.put(partition.getId() + "/" + "spoutLag", spoutLag);
-                        ret.put(partition.getId() + "/" + "latestTime", latestTimeOffset);
-                        ret.put(partition.getId() + "/" + "latestEmittedOffset", latestEmittedOffset);
-                        totalSpoutLag += spoutLag;
-                        totalLatestTimeOffset += latestTimeOffset;
-                        totalLatestEmittedOffset += latestEmittedOffset;
-                    }
-                    ret.put("totalSpoutLag", totalSpoutLag);
-                    ret.put("totalLatestTime", totalLatestTimeOffset);
-                    ret.put("totalLatestEmittedOffset", totalLatestEmittedOffset);
-                    return ret;
-                } else {
-                    LOG.info("Metrics Tick: Not enough data to calculate spout lag.");
-                }
-            } catch (Throwable t) {
-                LOG.warn("Metrics Tick: Exception when computing kafkaOffset metric.", t);
-            }
-            return null;
-        }
-
-        public void refreshPartitions(Set<Partition> partitions) {
-            _partitions = partitions;
-            Iterator<Partition> it = _partitionToOffset.keySet().iterator();
-            while (it.hasNext()) {
-                if (!partitions.contains(it.next())) {
-                    it.remove();
-                }
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
index ab4ec63..eceba47 100644
--- a/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
+++ b/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
@@ -7,8 +7,6 @@ import backtype.storm.metric.api.ReducedMetric;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.utils.Utils;
 import com.google.common.collect.ImmutableMap;
-import kafka.api.FetchRequest;
-import kafka.api.FetchRequestBuilder;
 import kafka.javaapi.consumer.SimpleConsumer;
 import kafka.javaapi.message.ByteBufferMessageSet;
 import kafka.message.Message;
@@ -16,13 +14,14 @@ import kafka.message.MessageAndOffset;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import storm.kafka.DynamicPartitionConnections;
+import storm.kafka.FailedFetchException;
+import storm.kafka.KafkaUtils;
 import storm.kafka.Partition;
 import storm.trident.operation.TridentCollector;
 import storm.trident.spout.IOpaquePartitionedTridentSpout;
 import storm.trident.spout.IPartitionedTridentSpout;
 import storm.trident.topology.TransactionAttempt;
 
-import java.net.ConnectException;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -103,16 +102,7 @@ public class TridentKafkaEmitter {
             }
             offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, startTime);
         }
-        ByteBufferMessageSet msgs;
-        try {
-            msgs = fetchMessages(consumer, partition, offset);
-        } catch (Exception e) {
-            if (e instanceof ConnectException) {
-                throw new FailedFetchException(e);
-            } else {
-                throw new RuntimeException(e);
-            }
-        }
+        ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
         long endoffset = offset;
         for (MessageAndOffset msg : msgs) {
             emit(collector, msg.message());
@@ -130,11 +120,8 @@ public class TridentKafkaEmitter {
     }
 
     private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) {
-        ByteBufferMessageSet msgs;
         long start = System.nanoTime();
-        FetchRequestBuilder builder = new FetchRequestBuilder();
-        FetchRequest fetchRequest = builder.addFetch(_config.topic, partition.partition, offset, _config.fetchSizeBytes).clientId(_config.clientId).build();
-        msgs = consumer.fetch(fetchRequest).messageSet(_config.topic, partition.partition);
+        ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
         long end = System.nanoTime();
         long millis = (end - start) / 1000000;
         _kafkaMeanFetchLatencyMetric.update(millis);

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/test/storm/kafka/KafkaTestBroker.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaTestBroker.java b/src/test/storm/kafka/KafkaTestBroker.java
new file mode 100644
index 0000000..7019c86
--- /dev/null
+++ b/src/test/storm/kafka/KafkaTestBroker.java
@@ -0,0 +1,52 @@
+package storm.kafka;
+
+import com.netflix.curator.framework.CuratorFramework;
+import com.netflix.curator.framework.CuratorFrameworkFactory;
+import com.netflix.curator.retry.ExponentialBackoffRetry;
+import com.netflix.curator.test.TestingServer;
+import kafka.server.KafkaServerStartable;
+
+import java.util.Properties;
+
+/**
+ * Date: 11/01/2014
+ * Time: 13:15
+ */
+public class KafkaTestBroker {
+
+    private final int port = 49123;
+    private KafkaServerStartable kafka;
+    private TestingServer server;
+    private String zookeeperConnectionString;
+
+    public KafkaTestBroker() {
+        try {
+            server = new TestingServer();
+            zookeeperConnectionString = server.getConnectString();
+            ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
+            CuratorFramework zookeeper = CuratorFrameworkFactory.newClient(zookeeperConnectionString, retryPolicy);
+            zookeeper.start();
+            Properties p = new Properties();
+            p.setProperty("zookeeper.connect", zookeeperConnectionString);
+            p.setProperty("broker.id", "0");
+            p.setProperty("port", "" + port);
+            kafka.server.KafkaConfig config = new kafka.server.KafkaConfig(p);
+            kafka = new KafkaServerStartable(config);
+            kafka.startup();
+        } catch (Exception ex) {
+            throw new RuntimeException("Could not start test broker", ex);
+        }
+    }
+
+    public String getBrokerConnectionString() {
+        return "localhost:" + port;
+    }
+
+    public int getPort() {
+        return port;
+    }
+
+    public void shutdown() {
+        kafka.shutdown();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/95c60dbb/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaUtilsTest.java b/src/test/storm/kafka/KafkaUtilsTest.java
new file mode 100644
index 0000000..506789c
--- /dev/null
+++ b/src/test/storm/kafka/KafkaUtilsTest.java
@@ -0,0 +1,90 @@
+package storm.kafka;
+
+import backtype.storm.utils.Utils;
+import kafka.api.OffsetRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.javaapi.producer.Producer;
+import kafka.producer.KeyedMessage;
+import kafka.producer.ProducerConfig;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import storm.kafka.trident.GlobalPartitionInformation;
+
+import java.util.Properties;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+public class KafkaUtilsTest {
+
+    private KafkaTestBroker broker;
+    private SimpleConsumer simpleConsumer;
+    private KafkaConfig config;
+
+    @Before
+    public void setup() {
+        broker = new KafkaTestBroker();
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
+        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
+        BrokerHosts brokerHosts = new StaticHosts(globalPartitionInformation);
+        config = new KafkaConfig(brokerHosts, "testTopic");
+        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+    }
+
+    @After
+    public void shutdown() {
+        broker.shutdown();
+    }
+
+
+    @Test(expected = FailedFetchException.class)
+    public void topicDoesNotExist() throws Exception {
+        KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), 0);
+    }
+
+    @Test(expected = FailedFetchException.class)
+    public void brokerIsDown() throws Exception {
+        int port = broker.getPort();
+        broker.shutdown();
+        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", port, 100, 1024, "testClient");
+        KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), OffsetRequest.LatestTime());
+    }
+
+    @Test
+    public void fetchMessage() throws Exception {
+        long lastOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
+        sendMessageAndAssertValueForOffset(lastOffset);
+    }
+
+    @Test(expected = FailedFetchException.class)
+    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingDisabled() throws Exception {
+        config.useStartOffsetTimeIfOffsetOutOfRange = false;
+        sendMessageAndAssertValueForOffset(-99);
+    }
+
+    @Test
+    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingEnabled() throws Exception {
+        sendMessageAndAssertValueForOffset(-99);
+    }
+
+    private String createTopicAndSendMessage() {
+        Properties p = new Properties();
+        p.setProperty("metadata.broker.list", "localhost:49123");
+        p.setProperty("serializer.class", "kafka.serializer.StringEncoder");
+        ProducerConfig producerConfig = new ProducerConfig(p);
+        Producer<String, String> producer = new Producer<String, String>(producerConfig);
+        String value = "value";
+        producer.send(new KeyedMessage<String, String>(config.topic, value));
+        return value;
+    }
+
+    private void sendMessageAndAssertValueForOffset(long offset) {
+        String value = createTopicAndSendMessage();
+        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
+        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
+        assertThat(message, is(equalTo(value)));
+    }
+}


[45/50] [abbrv] git commit: add topic to committed path

Posted by pt...@apache.org.
add topic to committed path


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/859a2e81
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/859a2e81
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/859a2e81

Branch: refs/heads/master
Commit: 859a2e81be8f61720fc2965acef5f73f1449a7bb
Parents: a72aafa
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Wed Apr 9 10:57:37 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Wed Apr 9 10:57:37 2014 -0400

----------------------------------------------------------------------
 .../src/jvm/storm/kafka/DynamicBrokersReader.java | 18 +++++++++++-------
 .../src/jvm/storm/kafka/KafkaUtils.java           |  4 ++--
 .../src/jvm/storm/kafka/PartitionManager.java     |  2 +-
 3 files changed, 14 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/859a2e81/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java b/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
index cd751fe..b9085bc 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
@@ -26,13 +26,17 @@ public class DynamicBrokersReader {
     public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
         _zkPath = zkPath;
         _topic = topic;
-        _curator = CuratorFrameworkFactory.newClient(
-                zkStr,
-                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
-                15000,
-                new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
-                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
-        _curator.start();
+        try {
+            _curator = CuratorFrameworkFactory.newClient(
+                    zkStr,
+                    Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
+                    15000,
+                    new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
+                            Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
+            _curator.start();
+        } catch (Exception ex) {
+            LOG.error("can't connect to zookeeper");
+        }
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/859a2e81/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
index 0e7f601..313e08e 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
@@ -41,7 +41,7 @@ public class KafkaUtils {
 
     public static long getOffset(SimpleConsumer consumer, String topic, int partition, KafkaConfig config) {
         long startOffsetTime = kafka.api.OffsetRequest.LatestTime();
-        if (config.forceFromStart) {
+        if ( config.forceFromStart ) {
             startOffsetTime = config.startOffsetTime;
         }
         return getOffset(consumer, topic, partition, startOffsetTime);
@@ -93,7 +93,7 @@ public class KafkaUtils {
                             LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
                             return null;
                         }
-                        long earliestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.EarliestTime());
+                        long earliestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.EarliestTime()); 
                         long latestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
                         if (earliestTimeOffset == 0 || latestTimeOffset == 0) {
                             LOG.warn("No data found in Kafka Partition " + partition.getId());

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/859a2e81/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java b/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
index 03075bb..68151eb 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
@@ -194,7 +194,7 @@ public class PartitionManager {
     }
 
     private String committedPath() {
-        return _spoutConfig.zkRoot + "/" + _spoutConfig.id + "/" + _partition.getId();
+        return _spoutConfig.zkRoot + "/" + _spoutConfig.id + "/" +  _spoutConfig.topic + "/" + _partition.getId();
     }
 
     public long queryPartitionOffsetLatestTime() {


[29/50] [abbrv] git commit: Merge pull request #25 from dschiavu/new-metric-earliest-offset

Posted by pt...@apache.org.
Merge pull request #25 from dschiavu/new-metric-earliest-offset

Add a new per-partition and total metric, "earliestTimeOffset"

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/09ae9730
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/09ae9730
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/09ae9730

Branch: refs/heads/master
Commit: 09ae97303d3faa0b4b837a3bbe18b996854d2733
Parents: 118cec4 c695c1b
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Thu Feb 27 22:26:35 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Thu Feb 27 22:26:35 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaUtils.java | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------



[47/50] [abbrv] git commit: apply Apache license headers

Posted by pt...@apache.org.
apply Apache license headers


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/72dbbee7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/72dbbee7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/72dbbee7

Branch: refs/heads/master
Commit: 72dbbee75fe2524ac4ffc2da6f5166483af7ad32
Parents: 3270dd2
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Wed Apr 9 11:40:53 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Wed Apr 9 11:40:53 2014 -0400

----------------------------------------------------------------------
 external/storm-kafka/CHANGELOG.md               |   5 +-
 external/storm-kafka/LICENSE                    | 191 -------------------
 .../storm-kafka/src/jvm/storm/kafka/Broker.java |  17 ++
 .../src/jvm/storm/kafka/BrokerHosts.java        |  22 ++-
 .../jvm/storm/kafka/DynamicBrokersReader.java   |  17 ++
 .../kafka/DynamicPartitionConnections.java      |  17 ++
 .../jvm/storm/kafka/FailedFetchException.java   |  17 ++
 .../src/jvm/storm/kafka/KafkaConfig.java        |  17 ++
 .../src/jvm/storm/kafka/KafkaError.java         |  21 +-
 .../src/jvm/storm/kafka/KafkaSpout.java         |  17 ++
 .../src/jvm/storm/kafka/KafkaUtils.java         |  17 ++
 .../src/jvm/storm/kafka/KeyValueScheme.java     |  17 ++
 .../kafka/KeyValueSchemeAsMultiScheme.java      |  17 ++
 .../src/jvm/storm/kafka/Partition.java          |  17 ++
 .../jvm/storm/kafka/PartitionCoordinator.java   |  17 ++
 .../src/jvm/storm/kafka/PartitionManager.java   |  17 ++
 .../src/jvm/storm/kafka/SpoutConfig.java        |  17 ++
 .../src/jvm/storm/kafka/StaticCoordinator.java  |  17 ++
 .../src/jvm/storm/kafka/StaticHosts.java        |  17 ++
 .../storm/kafka/StaticPartitionConnections.java |  17 ++
 .../jvm/storm/kafka/StringKeyValueScheme.java   |  17 ++
 .../src/jvm/storm/kafka/StringScheme.java       |  17 ++
 .../src/jvm/storm/kafka/ZkCoordinator.java      |  17 ++
 .../src/jvm/storm/kafka/ZkHosts.java            |  22 ++-
 .../src/jvm/storm/kafka/ZkState.java            |  17 ++
 .../src/jvm/storm/kafka/bolt/KafkaBolt.java     |  17 ++
 .../jvm/storm/kafka/trident/Coordinator.java    |  21 +-
 .../storm/kafka/trident/DefaultCoordinator.java |  17 ++
 .../trident/GlobalPartitionInformation.java     |  22 ++-
 .../storm/kafka/trident/IBatchCoordinator.java  |  17 ++
 .../jvm/storm/kafka/trident/IBrokerReader.java  |  17 ++
 .../src/jvm/storm/kafka/trident/MaxMetric.java  |  17 ++
 .../kafka/trident/OpaqueTridentKafkaSpout.java  |  17 ++
 .../storm/kafka/trident/StaticBrokerReader.java |  17 ++
 .../trident/TransactionalTridentKafkaSpout.java |  17 ++
 .../storm/kafka/trident/TridentKafkaConfig.java |  17 ++
 .../kafka/trident/TridentKafkaEmitter.java      |  21 +-
 .../jvm/storm/kafka/trident/ZkBrokerReader.java |  17 ++
 38 files changed, 619 insertions(+), 216 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/external/storm-kafka/CHANGELOG.md b/external/storm-kafka/CHANGELOG.md
index f876421..33a49ee 100644
--- a/external/storm-kafka/CHANGELOG.md
+++ b/external/storm-kafka/CHANGELOG.md
@@ -1,9 +1,12 @@
-## 0.5.0
+## 0.9.2-incubating (0.5.0)
+* incorporated as an Apache Storm external module
 * fixed partition assignment for KafkaSpout
 * upgraded to storm 0.9.1
+
 ## 0.4.0
 * added support for reading kafka message keys
 * configurable metrics emit interval
+
 ## 0.3.0
 * updated partition path in zookeeper
 * added error handling for fetch request

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/LICENSE
----------------------------------------------------------------------
diff --git a/external/storm-kafka/LICENSE b/external/storm-kafka/LICENSE
deleted file mode 100644
index 37ec93a..0000000
--- a/external/storm-kafka/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/Broker.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/Broker.java b/external/storm-kafka/src/jvm/storm/kafka/Broker.java
index 2451eee..bfa3e0b 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/Broker.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/Broker.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java b/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
index 12ef7b1..fcdf0b6 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
@@ -1,11 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import java.io.Serializable;
 
-/**
- * Date: 11/05/2013
- * Time: 14:40
- */
+
 public interface BrokerHosts extends Serializable {
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java b/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
index b9085bc..5197862 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.Config;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java b/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
index 8d0115b..f550858 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import kafka.javaapi.consumer.SimpleConsumer;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java b/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
index 0bd1123..011240e 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 public class FailedFetchException extends RuntimeException {

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
index 8ef2a88..875629b 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.spout.MultiScheme;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
index a67335c..634af85 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
@@ -1,9 +1,22 @@
-package storm.kafka;
-
 /**
- * Date: 11/01/2014
- * Time: 14:21
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
+package storm.kafka;
+
 public enum KafkaError {
     NO_ERROR,
     OFFSET_OUT_OF_RANGE,

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
index 79e33fe..102dce1 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.Config;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
index 313e08e..3d355e5 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.metric.api.IMetric;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java b/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
index df31cb8..f42f7c8 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.spout.Scheme;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java b/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
index 2412a1c..a570e7d 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.spout.SchemeAsMultiScheme;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/Partition.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/Partition.java b/external/storm-kafka/src/jvm/storm/kafka/Partition.java
index 96a3ad7..fe6ab4f 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/Partition.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/Partition.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import com.google.common.base.Objects;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
index d28248d..60cc237 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java b/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
index 68151eb..9504427 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.Config;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java b/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
index 05551ec..1b66026 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
index 040060c..456a2a1 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java b/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
index 9ed7193..bee7118 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import storm.kafka.trident.GlobalPartitionInformation;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java b/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
index a9b9db1..1353b6c 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import kafka.javaapi.consumer.SimpleConsumer;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java b/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
index a6adddb..41cacb6 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.tuple.Values;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java b/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
index a809448..102ea69 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.spout.Scheme;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
index ec35aed..e414d06 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java b/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
index f2e0fc2..d9acc66 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
@@ -1,9 +1,23 @@
-package storm.kafka;
-
 /**
- * Date: 11/05/2013
- * Time: 14:38
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
+package storm.kafka;
+
+
 public class ZkHosts implements BrokerHosts {
     private static final String DEFAULT_ZK_PATH = "/brokers";
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ZkState.java b/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
index d5416af..52585ef 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka;
 
 import backtype.storm.Config;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java b/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
index 89969d9..b9ea948 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.bolt;
 
 import backtype.storm.task.OutputCollector;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java b/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
index f67acaa..c395f8c 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import storm.kafka.KafkaUtils;
@@ -6,10 +23,6 @@ import storm.trident.spout.IPartitionedTridentSpout;
 
 import java.util.Map;
 
-/**
- * Date: 11/05/2013
- * Time: 19:35
- */
 class Coordinator implements IPartitionedTridentSpout.Coordinator<GlobalPartitionInformation>, IOpaquePartitionedTridentSpout.Coordinator<GlobalPartitionInformation> {
 
     private IBrokerReader reader;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
index 89cd503..04e1396 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 public class DefaultCoordinator implements IBatchCoordinator {

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java b/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
index 6f82f62..ae136e5 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import storm.kafka.Broker;
@@ -8,10 +25,7 @@ import java.util.*;
 
 import com.google.common.base.Objects;
 
-/**
- * Date: 14/05/2013
- * Time: 19:18
- */
+
 public class GlobalPartitionInformation implements Iterable<Partition>, Serializable {
 
     private Map<Integer, Broker> partitionMap;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
index 1b8a8ce..04231f4 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java b/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
index 73c9738..3e018d9 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 public interface IBrokerReader {

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java b/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
index a8f88ba..60d7c7b 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java b/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
index 35b7033..136e7d2 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import backtype.storm.task.TopologyContext;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java b/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
index 98a8f53..d1673f1 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 public class StaticBrokerReader implements IBrokerReader {

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
index b32d301..7a44bdd 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import backtype.storm.task.TopologyContext;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
index 073afa2..3878cc8 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import storm.kafka.BrokerHosts;

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
index 973ce8f..8c57681 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import backtype.storm.Config;
@@ -27,10 +44,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 
-/**
- * Date: 21/05/2013
- * Time: 08:38
- */
 public class TridentKafkaEmitter {
 
     public static final Logger LOG = LoggerFactory.getLogger(TridentKafkaEmitter.class);

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/72dbbee7/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java b/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
index 5e2361d..db11328 100644
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package storm.kafka.trident;
 
 import org.slf4j.Logger;


[15/50] [abbrv] git commit: Merge pull request #13 from EdisonXu/fix1

Posted by pt...@apache.org.
Merge pull request #13 from EdisonXu/fix1

fix keep refreshing partitions issue

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/48f68402
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/48f68402
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/48f68402

Branch: refs/heads/master
Commit: 48f6840273e369000421675dfb90141c0e3ac270
Parents: f789091 0a6203a
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Jan 26 13:17:20 2014 -0800
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Jan 26 13:17:20 2014 -0800

----------------------------------------------------------------------
 .../trident/GlobalPartitionInformation.java     | 26 ++++++++++++++++++++
 1 file changed, 26 insertions(+)
----------------------------------------------------------------------



[09/50] [abbrv] git commit: Update README.md

Posted by pt...@apache.org.
Update README.md

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/6b29f8f0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/6b29f8f0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/6b29f8f0

Branch: refs/heads/master
Commit: 6b29f8f0bf56b473bf06e2fc7a0f822e5c87d9bc
Parents: 4b6e9bc
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Tue Jan 7 08:00:36 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Tue Jan 7 08:00:36 2014 +0000

----------------------------------------------------------------------
 README.md | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/6b29f8f0/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index b734b5b..13b5b2c 100644
--- a/README.md
+++ b/README.md
@@ -8,3 +8,7 @@ For information on how to use this library in your project see:
 
 [https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus](https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus)
 
+
+##Example Topologies:
+
+[https://github.com/wurstmeister/storm-kafka-0.8-plus-test](https://github.com/wurstmeister/storm-kafka-0.8-plus-test)


[08/50] [abbrv] git commit: added changelog

Posted by pt...@apache.org.
added changelog


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/4b6e9bcf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/4b6e9bcf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/4b6e9bcf

Branch: refs/heads/master
Commit: 4b6e9bcf90f52b3eef4817ff30cb8792b4c3b8e0
Parents: b5de86e
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Jan 5 12:25:22 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Jan 5 12:25:22 2014 +0000

----------------------------------------------------------------------
 CHANGELOG.md | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/4b6e9bcf/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..2fb81fe
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,2 @@
+## 0.3.0
+* updated partition path in zookeeper


[43/50] [abbrv] git commit: Add storm-kafka as an external module.

Posted by pt...@apache.org.
Add storm-kafka as an external module.


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/7d1bf2a9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/7d1bf2a9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/7d1bf2a9

Branch: refs/heads/master
Commit: 7d1bf2a923f8416197f81b467d40f959b2f4f997
Parents: 38ea0ca 8fafbad
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Wed Apr 9 10:26:08 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Wed Apr 9 10:26:08 2014 -0400

----------------------------------------------------------------------
 external/storm-kafka/CHANGELOG.md               |  10 +
 external/storm-kafka/LICENSE                    | 191 ++++++++++++++
 external/storm-kafka/README.md                  |  22 ++
 external/storm-kafka/pom.xml                    | 138 ++++++++++
 .../storm-kafka/src/jvm/storm/kafka/Broker.java |  63 +++++
 .../src/jvm/storm/kafka/BrokerHosts.java        |  11 +
 .../jvm/storm/kafka/DynamicBrokersReader.java   | 124 +++++++++
 .../kafka/DynamicPartitionConnections.java      |  77 ++++++
 .../jvm/storm/kafka/FailedFetchException.java   |  12 +
 .../src/jvm/storm/kafka/KafkaConfig.java        |  33 +++
 .../src/jvm/storm/kafka/KafkaError.java         |  30 +++
 .../src/jvm/storm/kafka/KafkaSpout.java         | 173 +++++++++++++
 .../src/jvm/storm/kafka/KafkaUtils.java         | 218 ++++++++++++++++
 .../src/jvm/storm/kafka/KeyValueScheme.java     |  11 +
 .../kafka/KeyValueSchemeAsMultiScheme.java      |  19 ++
 .../src/jvm/storm/kafka/Partition.java          |  47 ++++
 .../jvm/storm/kafka/PartitionCoordinator.java   |   9 +
 .../src/jvm/storm/kafka/PartitionManager.java   | 224 ++++++++++++++++
 .../src/jvm/storm/kafka/SpoutConfig.java        |  19 ++
 .../src/jvm/storm/kafka/StaticCoordinator.java  |  31 +++
 .../src/jvm/storm/kafka/StaticHosts.java        |  21 ++
 .../storm/kafka/StaticPartitionConnections.java |  35 +++
 .../jvm/storm/kafka/StringKeyValueScheme.java   |  20 ++
 .../src/jvm/storm/kafka/StringScheme.java       |  29 +++
 .../src/jvm/storm/kafka/ZkCoordinator.java      |  95 +++++++
 .../src/jvm/storm/kafka/ZkHosts.java            |  22 ++
 .../src/jvm/storm/kafka/ZkState.java            |  99 +++++++
 .../src/jvm/storm/kafka/bolt/KafkaBolt.java     |  72 ++++++
 .../jvm/storm/kafka/trident/Coordinator.java    |  37 +++
 .../storm/kafka/trident/DefaultCoordinator.java |  14 +
 .../trident/GlobalPartitionInformation.java     |  85 ++++++
 .../storm/kafka/trident/IBatchCoordinator.java  |   9 +
 .../jvm/storm/kafka/trident/IBrokerReader.java  |   8 +
 .../src/jvm/storm/kafka/trident/MaxMetric.java  |  23 ++
 .../kafka/trident/OpaqueTridentKafkaSpout.java  |  42 +++
 .../storm/kafka/trident/StaticBrokerReader.java |  19 ++
 .../trident/TransactionalTridentKafkaSpout.java |  41 +++
 .../storm/kafka/trident/TridentKafkaConfig.java |  20 ++
 .../kafka/trident/TridentKafkaEmitter.java      | 256 +++++++++++++++++++
 .../jvm/storm/kafka/trident/ZkBrokerReader.java |  45 ++++
 .../storm/kafka/DynamicBrokersReaderTest.java   | 155 +++++++++++
 .../src/test/storm/kafka/KafkaErrorTest.java    |  39 +++
 .../src/test/storm/kafka/KafkaTestBroker.java   |  53 ++++
 .../src/test/storm/kafka/KafkaUtilsTest.java    | 221 ++++++++++++++++
 .../storm/kafka/StringKeyValueSchemeTest.java   |  38 +++
 .../src/test/storm/kafka/TestUtils.java         |  20 ++
 .../src/test/storm/kafka/ZkCoordinatorTest.java | 130 ++++++++++
 .../test/storm/kafka/bolt/KafkaBoltTest.java    | 171 +++++++++++++
 pom.xml                                         |   1 +
 49 files changed, 3282 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/CHANGELOG.md
----------------------------------------------------------------------
diff --cc external/storm-kafka/CHANGELOG.md
index 0000000,0000000..f876421
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/CHANGELOG.md
@@@ -1,0 -1,0 +1,10 @@@
++## 0.5.0
++* fixed partition assignment for KafkaSpout
++* upgraded to storm 0.9.1
++## 0.4.0
++* added support for reading kafka message keys
++* configurable metrics emit interval
++## 0.3.0
++* updated partition path in zookeeper
++* added error handling for fetch request
++

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/LICENSE
----------------------------------------------------------------------
diff --cc external/storm-kafka/LICENSE
index 0000000,0000000..37ec93a
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/LICENSE
@@@ -1,0 -1,0 +1,191 @@@
++Apache License
++Version 2.0, January 2004
++http://www.apache.org/licenses/
++
++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++
++1. Definitions.
++
++"License" shall mean the terms and conditions for use, reproduction, and
++distribution as defined by Sections 1 through 9 of this document.
++
++"Licensor" shall mean the copyright owner or entity authorized by the copyright
++owner that is granting the License.
++
++"Legal Entity" shall mean the union of the acting entity and all other entities
++that control, are controlled by, or are under common control with that entity.
++For the purposes of this definition, "control" means (i) the power, direct or
++indirect, to cause the direction or management of such entity, whether by
++contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
++outstanding shares, or (iii) beneficial ownership of such entity.
++
++"You" (or "Your") shall mean an individual or Legal Entity exercising
++permissions granted by this License.
++
++"Source" form shall mean the preferred form for making modifications, including
++but not limited to software source code, documentation source, and configuration
++files.
++
++"Object" form shall mean any form resulting from mechanical transformation or
++translation of a Source form, including but not limited to compiled object code,
++generated documentation, and conversions to other media types.
++
++"Work" shall mean the work of authorship, whether in Source or Object form, made
++available under the License, as indicated by a copyright notice that is included
++in or attached to the work (an example is provided in the Appendix below).
++
++"Derivative Works" shall mean any work, whether in Source or Object form, that
++is based on (or derived from) the Work and for which the editorial revisions,
++annotations, elaborations, or other modifications represent, as a whole, an
++original work of authorship. For the purposes of this License, Derivative Works
++shall not include works that remain separable from, or merely link (or bind by
++name) to the interfaces of, the Work and Derivative Works thereof.
++
++"Contribution" shall mean any work of authorship, including the original version
++of the Work and any modifications or additions to that Work or Derivative Works
++thereof, that is intentionally submitted to Licensor for inclusion in the Work
++by the copyright owner or by an individual or Legal Entity authorized to submit
++on behalf of the copyright owner. For the purposes of this definition,
++"submitted" means any form of electronic, verbal, or written communication sent
++to the Licensor or its representatives, including but not limited to
++communication on electronic mailing lists, source code control systems, and
++issue tracking systems that are managed by, or on behalf of, the Licensor for
++the purpose of discussing and improving the Work, but excluding communication
++that is conspicuously marked or otherwise designated in writing by the copyright
++owner as "Not a Contribution."
++
++"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
++of whom a Contribution has been received by Licensor and subsequently
++incorporated within the Work.
++
++2. Grant of Copyright License.
++
++Subject to the terms and conditions of this License, each Contributor hereby
++grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
++irrevocable copyright license to reproduce, prepare Derivative Works of,
++publicly display, publicly perform, sublicense, and distribute the Work and such
++Derivative Works in Source or Object form.
++
++3. Grant of Patent License.
++
++Subject to the terms and conditions of this License, each Contributor hereby
++grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
++irrevocable (except as stated in this section) patent license to make, have
++made, use, offer to sell, sell, import, and otherwise transfer the Work, where
++such license applies only to those patent claims licensable by such Contributor
++that are necessarily infringed by their Contribution(s) alone or by combination
++of their Contribution(s) with the Work to which such Contribution(s) was
++submitted. If You institute patent litigation against any entity (including a
++cross-claim or counterclaim in a lawsuit) alleging that the Work or a
++Contribution incorporated within the Work constitutes direct or contributory
++patent infringement, then any patent licenses granted to You under this License
++for that Work shall terminate as of the date such litigation is filed.
++
++4. Redistribution.
++
++You may reproduce and distribute copies of the Work or Derivative Works thereof
++in any medium, with or without modifications, and in Source or Object form,
++provided that You meet the following conditions:
++
++You must give any other recipients of the Work or Derivative Works a copy of
++this License; and
++You must cause any modified files to carry prominent notices stating that You
++changed the files; and
++You must retain, in the Source form of any Derivative Works that You distribute,
++all copyright, patent, trademark, and attribution notices from the Source form
++of the Work, excluding those notices that do not pertain to any part of the
++Derivative Works; and
++If the Work includes a "NOTICE" text file as part of its distribution, then any
++Derivative Works that You distribute must include a readable copy of the
++attribution notices contained within such NOTICE file, excluding those notices
++that do not pertain to any part of the Derivative Works, in at least one of the
++following places: within a NOTICE text file distributed as part of the
++Derivative Works; within the Source form or documentation, if provided along
++with the Derivative Works; or, within a display generated by the Derivative
++Works, if and wherever such third-party notices normally appear. The contents of
++the NOTICE file are for informational purposes only and do not modify the
++License. You may add Your own attribution notices within Derivative Works that
++You distribute, alongside or as an addendum to the NOTICE text from the Work,
++provided that such additional attribution notices cannot be construed as
++modifying the License.
++You may add Your own copyright statement to Your modifications and may provide
++additional or different license terms and conditions for use, reproduction, or
++distribution of Your modifications, or for any such Derivative Works as a whole,
++provided Your use, reproduction, and distribution of the Work otherwise complies
++with the conditions stated in this License.
++
++5. Submission of Contributions.
++
++Unless You explicitly state otherwise, any Contribution intentionally submitted
++for inclusion in the Work by You to the Licensor shall be under the terms and
++conditions of this License, without any additional terms or conditions.
++Notwithstanding the above, nothing herein shall supersede or modify the terms of
++any separate license agreement you may have executed with Licensor regarding
++such Contributions.
++
++6. Trademarks.
++
++This License does not grant permission to use the trade names, trademarks,
++service marks, or product names of the Licensor, except as required for
++reasonable and customary use in describing the origin of the Work and
++reproducing the content of the NOTICE file.
++
++7. Disclaimer of Warranty.
++
++Unless required by applicable law or agreed to in writing, Licensor provides the
++Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
++including, without limitation, any warranties or conditions of TITLE,
++NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
++solely responsible for determining the appropriateness of using or
++redistributing the Work and assume any risks associated with Your exercise of
++permissions under this License.
++
++8. Limitation of Liability.
++
++In no event and under no legal theory, whether in tort (including negligence),
++contract, or otherwise, unless required by applicable law (such as deliberate
++and grossly negligent acts) or agreed to in writing, shall any Contributor be
++liable to You for damages, including any direct, indirect, special, incidental,
++or consequential damages of any character arising as a result of this License or
++out of the use or inability to use the Work (including but not limited to
++damages for loss of goodwill, work stoppage, computer failure or malfunction, or
++any and all other commercial damages or losses), even if such Contributor has
++been advised of the possibility of such damages.
++
++9. Accepting Warranty or Additional Liability.
++
++While redistributing the Work or Derivative Works thereof, You may choose to
++offer, and charge a fee for, acceptance of support, warranty, indemnity, or
++other liability obligations and/or rights consistent with this License. However,
++in accepting such obligations, You may act only on Your own behalf and on Your
++sole responsibility, not on behalf of any other Contributor, and only if You
++agree to indemnify, defend, and hold each Contributor harmless for any liability
++incurred by, or claims asserted against, such Contributor by reason of your
++accepting any such warranty or additional liability.
++
++END OF TERMS AND CONDITIONS
++
++APPENDIX: How to apply the Apache License to your work
++
++To apply the Apache License to your work, attach the following boilerplate
++notice, with the fields enclosed by brackets "[]" replaced with your own
++identifying information. (Don't include the brackets!) The text should be
++enclosed in the appropriate comment syntax for the file format. We also
++recommend that a file or class name and description of purpose be included on
++the same "printed page" as the copyright notice for easier identification within
++third-party archives.
++
++   Copyright [yyyy] [name of copyright owner]
++
++   Licensed under the Apache License, Version 2.0 (the "License");
++   you may not use this file except in compliance with the License.
++   You may obtain a copy of the License at
++
++     http://www.apache.org/licenses/LICENSE-2.0
++
++   Unless required by applicable law or agreed to in writing, software
++   distributed under the License is distributed on an "AS IS" BASIS,
++   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++   See the License for the specific language governing permissions and
++   limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/README.md
----------------------------------------------------------------------
diff --cc external/storm-kafka/README.md
index 0000000,0000000..874db01
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/README.md
@@@ -1,0 -1,0 +1,22 @@@
++storm-kafka-0.8-plus
++====================
++
++Port of storm-kafka to support kafka >= 0.8
++
++##Usage:
++For information on how to use this library in your project see:
++
++[https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus](https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus)
++
++
++##Example Topologies:
++
++[https://github.com/wurstmeister/storm-kafka-0.8-plus-test](https://github.com/wurstmeister/storm-kafka-0.8-plus-test)
++
++##Acknowledgement:
++
++YourKit is kindly supporting this open source project with its full-featured Java Profiler.
++YourKit, LLC is the creator of innovative and intelligent tools for profiling
++Java and .NET applications. Take a look at YourKit's leading software products:
++<a href="http://www.yourkit.com/java/profiler/index.jsp">YourKit Java Profiler</a> and
++<a href="http://www.yourkit.com/.net/profiler/index.jsp">YourKit .NET Profiler</a>.

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/pom.xml
----------------------------------------------------------------------
diff --cc external/storm-kafka/pom.xml
index 0000000,0000000..15743b6
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/pom.xml
@@@ -1,0 -1,0 +1,138 @@@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++ Licensed to the Apache Software Foundation (ASF) under one or more
++ contributor license agreements.  See the NOTICE file distributed with
++ this work for additional information regarding copyright ownership.
++ The ASF licenses this file to You under the Apache License, Version 2.0
++ (the "License"); you may not use this file except in compliance with
++ the License.  You may obtain a copy of the License at
++
++     http://www.apache.org/licenses/LICENSE-2.0
++
++ Unless required by applicable law or agreed to in writing, software
++ distributed under the License is distributed on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ See the License for the specific language governing permissions and
++ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
++    <modelVersion>4.0.0</modelVersion>
++	
++    <parent>
++        <artifactId>storm</artifactId>
++        <groupId>org.apache.storm</groupId>
++        <version>0.9.2-incubating-SNAPSHOT</version>
++        <relativePath>../../pom.xml</relativePath>
++    </parent>
++	
++    <packaging>jar</packaging>
++	<artifactId>storm-kafka</artifactId>
++    <name>storm-kafka</name>
++    <description>Storm Spouts for Apache Kafka</description>
++    <properties>
++        <scalaVersion>2.9.2</scalaVersion>
++        <kafkaArtifact>kafka_2.9.2</kafkaArtifact>
++        <envClassifier></envClassifier>
++    </properties>
++    <build>
++        <plugins>
++
++        </plugins>
++        <sourceDirectory>src/jvm</sourceDirectory>
++        <testSourceDirectory>src/test</testSourceDirectory>
++    </build>
++    <dependencies>
++        <dependency>
++            <groupId>org.mockito</groupId>
++            <artifactId>mockito-all</artifactId>
++            <version>1.9.0</version>
++            <scope>test</scope>
++        </dependency>
++        <dependency>
++            <groupId>org.scala-lang</groupId>
++            <artifactId>scala-library</artifactId>
++            <version>${scalaVersion}</version>
++        </dependency>
++        <dependency>
++            <groupId>junit</groupId>
++            <artifactId>junit</artifactId>
++            <version>4.11</version>
++            <scope>test</scope>
++        </dependency>
++        <dependency>
++            <groupId>com.netflix.curator</groupId>
++            <artifactId>curator-framework</artifactId>
++            <version>1.3.3</version>
++            <exclusions>
++                <exclusion>
++                    <groupId>log4j</groupId>
++                    <artifactId>log4j</artifactId>
++                </exclusion>
++                <exclusion>
++                    <groupId>org.slf4j</groupId>
++                    <artifactId>slf4j-log4j12</artifactId>
++                </exclusion>
++            </exclusions>
++        </dependency>
++        <dependency>
++            <groupId>com.netflix.curator</groupId>
++            <artifactId>curator-recipes</artifactId>
++            <version>1.3.3</version>
++            <exclusions>
++                <exclusion>
++                    <groupId>log4j</groupId>
++                    <artifactId>log4j</artifactId>
++                </exclusion>
++            </exclusions>
++            <scope>test</scope>
++        </dependency>
++        <dependency>
++            <groupId>com.netflix.curator</groupId>
++            <artifactId>curator-test</artifactId>
++            <version>1.3.3</version>
++            <exclusions>
++                <exclusion>
++                    <groupId>log4j</groupId>
++                    <artifactId>log4j</artifactId>
++                </exclusion>
++                <exclusion>
++                    <groupId>org.testng</groupId>
++                    <artifactId>testng</artifactId>
++                </exclusion>
++            </exclusions>
++            <scope>test</scope>
++        </dependency>
++        <dependency>
++            <groupId>org.apache.kafka</groupId>
++            <artifactId>${kafkaArtifact}</artifactId>
++            <version>0.8.0</version>
++            <exclusions>
++                <exclusion>
++                    <groupId>org.apache.zookeeper</groupId>
++                    <artifactId>zookeeper</artifactId>
++                </exclusion>
++                <exclusion>
++                    <groupId>log4j</groupId>
++                    <artifactId>log4j</artifactId>
++                </exclusion>
++            </exclusions>
++        </dependency>
++        <dependency>
++            <groupId>org.apache.storm</groupId>
++            <artifactId>storm-core</artifactId>
++            <version>${project.version}</version>
++            <scope>provided</scope>
++        </dependency>
++    </dependencies>
++    <profiles>
++        <profile>
++            <id>Scala-2.10</id>
++            <properties>
++                <scalaVersion>2.10.3</scalaVersion>
++                <kafkaArtifact>kafka_2.10</kafkaArtifact>
++                <envClassifier>scala_2.10</envClassifier>
++            </properties>
++        </profile>
++    </profiles>
++</project>

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/Broker.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/Broker.java
index 0000000,0000000..2451eee
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/Broker.java
@@@ -1,0 -1,0 +1,63 @@@
++package storm.kafka;
++
++import java.io.Serializable;
++import com.google.common.base.Objects;
++
++public class Broker implements Serializable, Comparable<Broker> {
++    public final String host;
++    public final int port;
++
++    public Broker(String host, int port) {
++        this.host = host;
++        this.port = port;
++    }
++
++    public Broker(String host) {
++        this(host, 9092);
++    }
++
++    @Override
++    public int hashCode() {
++        return Objects.hashCode(host, port);
++    }
++
++    @Override
++    public boolean equals(Object obj) {
++        if (this == obj) {
++            return true;
++        }
++        if (obj == null || getClass() != obj.getClass()) {
++            return false;
++        }
++        final Broker other = (Broker) obj;
++        return Objects.equal(this.host, other.host) && Objects.equal(this.port, other.port);
++    }
++
++    @Override
++    public String toString() {
++        return host + ":" + port;
++    }
++
++    public static Broker fromString(String host) {
++        Broker hp;
++        String[] spec = host.split(":");
++        if (spec.length == 1) {
++            hp = new Broker(spec[0]);
++        } else if (spec.length == 2) {
++            hp = new Broker(spec[0], Integer.parseInt(spec[1]));
++        } else {
++            throw new IllegalArgumentException("Invalid host specification: " + host);
++        }
++        return hp;
++    }
++
++
++    @Override
++    public int compareTo(Broker o) {
++        if (this.host.equals(o.host)) {
++            return this.port - o.port;
++        } else {
++            return this.host.compareTo(o.host);
++        }
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
index 0000000,0000000..12ef7b1
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
@@@ -1,0 -1,0 +1,11 @@@
++package storm.kafka;
++
++import java.io.Serializable;
++
++/**
++ * Date: 11/05/2013
++ * Time: 14:40
++ */
++public interface BrokerHosts extends Serializable {
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
index 0000000,0000000..cd751fe
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
@@@ -1,0 -1,0 +1,124 @@@
++package storm.kafka;
++
++import backtype.storm.Config;
++import backtype.storm.utils.Utils;
++import com.netflix.curator.framework.CuratorFramework;
++import com.netflix.curator.framework.CuratorFrameworkFactory;
++import com.netflix.curator.retry.RetryNTimes;
++import org.json.simple.JSONValue;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.trident.GlobalPartitionInformation;
++
++import java.io.IOException;
++import java.io.UnsupportedEncodingException;
++import java.util.List;
++import java.util.Map;
++
++public class DynamicBrokersReader {
++
++    public static final Logger LOG = LoggerFactory.getLogger(DynamicBrokersReader.class);
++
++    private CuratorFramework _curator;
++    private String _zkPath;
++    private String _topic;
++
++    public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
++        _zkPath = zkPath;
++        _topic = topic;
++        _curator = CuratorFrameworkFactory.newClient(
++                zkStr,
++                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
++                15000,
++                new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
++                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
++        _curator.start();
++    }
++
++    /**
++     * Get all partitions with their current leaders
++     */
++    public GlobalPartitionInformation getBrokerInfo() {
++        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
++        try {
++            int numPartitionsForTopic = getNumPartitions();
++            String brokerInfoPath = brokerPath();
++            for (int partition = 0; partition < numPartitionsForTopic; partition++) {
++                int leader = getLeaderFor(partition);
++                String path = brokerInfoPath + "/" + leader;
++                try {
++                    byte[] brokerData = _curator.getData().forPath(path);
++                    Broker hp = getBrokerHost(brokerData);
++                    globalPartitionInformation.addPartition(partition, hp);
++                } catch (org.apache.zookeeper.KeeperException.NoNodeException e) {
++                    LOG.error("Node {} does not exist ", path);
++                }
++            }
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++        LOG.info("Read partition info from zookeeper: " + globalPartitionInformation);
++        return globalPartitionInformation;
++    }
++
++
++    private int getNumPartitions() {
++        try {
++            String topicBrokersPath = partitionPath();
++            List<String> children = _curator.getChildren().forPath(topicBrokersPath);
++            return children.size();
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++    public String partitionPath() {
++        return _zkPath + "/topics/" + _topic + "/partitions";
++    }
++
++    public String brokerPath() {
++        return _zkPath + "/ids";
++    }
++
++    /**
++     * get /brokers/topics/distributedTopic/partitions/1/state
++     * { "controller_epoch":4, "isr":[ 1, 0 ], "leader":1, "leader_epoch":1, "version":1 }
++     *
++     * @param partition
++     * @return
++     */
++    private int getLeaderFor(long partition) {
++        try {
++            String topicBrokersPath = partitionPath();
++            byte[] hostPortData = _curator.getData().forPath(topicBrokersPath + "/" + partition + "/state");
++            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(hostPortData, "UTF-8"));
++            Integer leader = ((Number) value.get("leader")).intValue();
++            return leader;
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++    public void close() {
++        _curator.close();
++    }
++
++    /**
++     * [zk: localhost:2181(CONNECTED) 56] get /brokers/ids/0
++     * { "host":"localhost", "jmx_port":9999, "port":9092, "version":1 }
++     *
++     * @param contents
++     * @return
++     */
++    private Broker getBrokerHost(byte[] contents) {
++        try {
++            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(contents, "UTF-8"));
++            String host = (String) value.get("host");
++            Integer port = ((Long) value.get("port")).intValue();
++            return new Broker(host, port);
++        } catch (UnsupportedEncodingException e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
index 0000000,0000000..8d0115b
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
@@@ -1,0 -1,0 +1,77 @@@
++package storm.kafka;
++
++import kafka.javaapi.consumer.SimpleConsumer;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.trident.IBrokerReader;
++
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.Map;
++import java.util.Set;
++
++
++public class DynamicPartitionConnections {
++
++    public static final Logger LOG = LoggerFactory.getLogger(DynamicPartitionConnections.class);
++
++    static class ConnectionInfo {
++        SimpleConsumer consumer;
++        Set<Integer> partitions = new HashSet();
++
++        public ConnectionInfo(SimpleConsumer consumer) {
++            this.consumer = consumer;
++        }
++    }
++
++    Map<Broker, ConnectionInfo> _connections = new HashMap();
++    KafkaConfig _config;
++    IBrokerReader _reader;
++
++    public DynamicPartitionConnections(KafkaConfig config, IBrokerReader brokerReader) {
++        _config = config;
++        _reader = brokerReader;
++    }
++
++    public SimpleConsumer register(Partition partition) {
++        Broker broker = _reader.getCurrentBrokers().getBrokerFor(partition.partition);
++        return register(broker, partition.partition);
++    }
++
++    public SimpleConsumer register(Broker host, int partition) {
++        if (!_connections.containsKey(host)) {
++            _connections.put(host, new ConnectionInfo(new SimpleConsumer(host.host, host.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId)));
++        }
++        ConnectionInfo info = _connections.get(host);
++        info.partitions.add(partition);
++        return info.consumer;
++    }
++
++    public SimpleConsumer getConnection(Partition partition) {
++        ConnectionInfo info = _connections.get(partition.host);
++        if (info != null) {
++            return info.consumer;
++        }
++        return null;
++    }
++
++    public void unregister(Broker port, int partition) {
++        ConnectionInfo info = _connections.get(port);
++        info.partitions.remove(partition);
++        if (info.partitions.isEmpty()) {
++            info.consumer.close();
++            _connections.remove(port);
++        }
++    }
++
++    public void unregister(Partition partition) {
++        unregister(partition.host, partition.partition);
++    }
++
++    public void clear() {
++        for (ConnectionInfo info : _connections.values()) {
++            info.consumer.close();
++        }
++        _connections.clear();
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
index 0000000,0000000..0bd1123
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
@@@ -1,0 -1,0 +1,12 @@@
++package storm.kafka;
++
++public class FailedFetchException extends RuntimeException {
++
++    public FailedFetchException(String message) {
++        super(message);
++    }
++
++    public FailedFetchException(Exception e) {
++        super(e);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
index 0000000,0000000..8ef2a88
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
@@@ -1,0 -1,0 +1,33 @@@
++package storm.kafka;
++
++import backtype.storm.spout.MultiScheme;
++import backtype.storm.spout.RawMultiScheme;
++
++import java.io.Serializable;
++
++public class KafkaConfig implements Serializable {
++
++    public final BrokerHosts hosts;
++    public final String topic;
++    public final String clientId;
++
++    public int fetchSizeBytes = 1024 * 1024;
++    public int socketTimeoutMs = 10000;
++    public int bufferSizeBytes = 1024 * 1024;
++    public MultiScheme scheme = new RawMultiScheme();
++    public boolean forceFromStart = false;
++    public long startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
++    public boolean useStartOffsetTimeIfOffsetOutOfRange = true;
++    public int metricsTimeBucketSizeInSecs = 60;
++
++    public KafkaConfig(BrokerHosts hosts, String topic) {
++        this(hosts, topic, kafka.api.OffsetRequest.DefaultClientId());
++    }
++
++    public KafkaConfig(BrokerHosts hosts, String topic, String clientId) {
++        this.hosts = hosts;
++        this.topic = topic;
++        this.clientId = clientId;
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
index 0000000,0000000..a67335c
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
@@@ -1,0 -1,0 +1,30 @@@
++package storm.kafka;
++
++/**
++ * Date: 11/01/2014
++ * Time: 14:21
++ */
++public enum KafkaError {
++    NO_ERROR,
++    OFFSET_OUT_OF_RANGE,
++    INVALID_MESSAGE,
++    UNKNOWN_TOPIC_OR_PARTITION,
++    INVALID_FETCH_SIZE,
++    LEADER_NOT_AVAILABLE,
++    NOT_LEADER_FOR_PARTITION,
++    REQUEST_TIMED_OUT,
++    BROKER_NOT_AVAILABLE,
++    REPLICA_NOT_AVAILABLE,
++    MESSAGE_SIZE_TOO_LARGE,
++    STALE_CONTROLLER_EPOCH,
++    OFFSET_METADATA_TOO_LARGE,
++    UNKNOWN;
++
++    public static KafkaError getError(int errorCode) {
++        if (errorCode < 0 || errorCode >= UNKNOWN.ordinal()) {
++            return UNKNOWN;
++        } else {
++            return values()[errorCode];
++        }
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
index 0000000,0000000..79e33fe
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
@@@ -1,0 -1,0 +1,173 @@@
++package storm.kafka;
++
++import backtype.storm.Config;
++import backtype.storm.metric.api.IMetric;
++import backtype.storm.spout.SpoutOutputCollector;
++import backtype.storm.task.TopologyContext;
++import backtype.storm.topology.OutputFieldsDeclarer;
++import backtype.storm.topology.base.BaseRichSpout;
++import kafka.message.Message;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.PartitionManager.KafkaMessageId;
++
++import java.util.*;
++
++// TODO: need to add blacklisting
++// TODO: need to make a best effort to not re-emit messages if don't have to
++public class KafkaSpout extends BaseRichSpout {
++    public static class MessageAndRealOffset {
++        public Message msg;
++        public long offset;
++
++        public MessageAndRealOffset(Message msg, long offset) {
++            this.msg = msg;
++            this.offset = offset;
++        }
++    }
++
++    static enum EmitState {
++        EMITTED_MORE_LEFT,
++        EMITTED_END,
++        NO_EMITTED
++    }
++
++    public static final Logger LOG = LoggerFactory.getLogger(KafkaSpout.class);
++
++    String _uuid = UUID.randomUUID().toString();
++    SpoutConfig _spoutConfig;
++    SpoutOutputCollector _collector;
++    PartitionCoordinator _coordinator;
++    DynamicPartitionConnections _connections;
++    ZkState _state;
++
++    long _lastUpdateMs = 0;
++
++    int _currPartitionIndex = 0;
++
++    public KafkaSpout(SpoutConfig spoutConf) {
++        _spoutConfig = spoutConf;
++    }
++
++    @Override
++    public void open(Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
++        _collector = collector;
++
++        Map stateConf = new HashMap(conf);
++        List<String> zkServers = _spoutConfig.zkServers;
++        if (zkServers == null) {
++            zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
++        }
++        Integer zkPort = _spoutConfig.zkPort;
++        if (zkPort == null) {
++            zkPort = ((Number) conf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();
++        }
++        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, zkServers);
++        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, zkPort);
++        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_ROOT, _spoutConfig.zkRoot);
++        _state = new ZkState(stateConf);
++
++        _connections = new DynamicPartitionConnections(_spoutConfig, KafkaUtils.makeBrokerReader(conf, _spoutConfig));
++
++        // using TransactionalState like this is a hack
++        int totalTasks = context.getComponentTasks(context.getThisComponentId()).size();
++        if (_spoutConfig.hosts instanceof StaticHosts) {
++            _coordinator = new StaticCoordinator(_connections, conf, _spoutConfig, _state, context.getThisTaskIndex(), totalTasks, _uuid);
++        } else {
++            _coordinator = new ZkCoordinator(_connections, conf, _spoutConfig, _state, context.getThisTaskIndex(), totalTasks, _uuid);
++        }
++
++        context.registerMetric("kafkaOffset", new IMetric() {
++            KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_spoutConfig.topic, _connections);
++
++            @Override
++            public Object getValueAndReset() {
++                List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
++                Set<Partition> latestPartitions = new HashSet();
++                for (PartitionManager pm : pms) {
++                    latestPartitions.add(pm.getPartition());
++                }
++                _kafkaOffsetMetric.refreshPartitions(latestPartitions);
++                for (PartitionManager pm : pms) {
++                    _kafkaOffsetMetric.setLatestEmittedOffset(pm.getPartition(), pm.lastCompletedOffset());
++                }
++                return _kafkaOffsetMetric.getValueAndReset();
++            }
++        }, _spoutConfig.metricsTimeBucketSizeInSecs);
++
++        context.registerMetric("kafkaPartition", new IMetric() {
++            @Override
++            public Object getValueAndReset() {
++                List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
++                Map concatMetricsDataMaps = new HashMap();
++                for (PartitionManager pm : pms) {
++                    concatMetricsDataMaps.putAll(pm.getMetricsDataMap());
++                }
++                return concatMetricsDataMaps;
++            }
++        }, _spoutConfig.metricsTimeBucketSizeInSecs);
++    }
++
++    @Override
++    public void close() {
++        _state.close();
++    }
++
++    @Override
++    public void nextTuple() {
++        List<PartitionManager> managers = _coordinator.getMyManagedPartitions();
++        for (int i = 0; i < managers.size(); i++) {
++
++            // in case the number of managers decreased
++            _currPartitionIndex = _currPartitionIndex % managers.size();
++            EmitState state = managers.get(_currPartitionIndex).next(_collector);
++            if (state != EmitState.EMITTED_MORE_LEFT) {
++                _currPartitionIndex = (_currPartitionIndex + 1) % managers.size();
++            }
++            if (state != EmitState.NO_EMITTED) {
++                break;
++            }
++        }
++
++        long now = System.currentTimeMillis();
++        if ((now - _lastUpdateMs) > _spoutConfig.stateUpdateIntervalMs) {
++            commit();
++        }
++    }
++
++    @Override
++    public void ack(Object msgId) {
++        KafkaMessageId id = (KafkaMessageId) msgId;
++        PartitionManager m = _coordinator.getManager(id.partition);
++        if (m != null) {
++            m.ack(id.offset);
++        }
++    }
++
++    @Override
++    public void fail(Object msgId) {
++        KafkaMessageId id = (KafkaMessageId) msgId;
++        PartitionManager m = _coordinator.getManager(id.partition);
++        if (m != null) {
++            m.fail(id.offset);
++        }
++    }
++
++    @Override
++    public void deactivate() {
++        commit();
++    }
++
++    @Override
++    public void declareOutputFields(OutputFieldsDeclarer declarer) {
++        declarer.declare(_spoutConfig.scheme.getOutputFields());
++    }
++
++    private void commit() {
++        _lastUpdateMs = System.currentTimeMillis();
++        for (PartitionManager manager : _coordinator.getMyManagedPartitions()) {
++            manager.commit();
++        }
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
index 0000000,0000000..0e7f601
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
@@@ -1,0 -1,0 +1,218 @@@
++package storm.kafka;
++
++import backtype.storm.metric.api.IMetric;
++import backtype.storm.utils.Utils;
++import com.google.common.base.Preconditions;
++import kafka.api.FetchRequest;
++import kafka.api.FetchRequestBuilder;
++import kafka.api.PartitionOffsetRequestInfo;
++import kafka.common.TopicAndPartition;
++import kafka.javaapi.FetchResponse;
++import kafka.javaapi.OffsetRequest;
++import kafka.javaapi.consumer.SimpleConsumer;
++import kafka.javaapi.message.ByteBufferMessageSet;
++import kafka.message.Message;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.trident.GlobalPartitionInformation;
++import storm.kafka.trident.IBrokerReader;
++import storm.kafka.trident.StaticBrokerReader;
++import storm.kafka.trident.ZkBrokerReader;
++
++import java.net.ConnectException;
++import java.nio.ByteBuffer;
++import java.util.*;
++
++
++public class KafkaUtils {
++
++    public static final Logger LOG = LoggerFactory.getLogger(KafkaUtils.class);
++    private static final int NO_OFFSET = -5;
++
++
++    public static IBrokerReader makeBrokerReader(Map stormConf, KafkaConfig conf) {
++        if (conf.hosts instanceof StaticHosts) {
++            return new StaticBrokerReader(((StaticHosts) conf.hosts).getPartitionInformation());
++        } else {
++            return new ZkBrokerReader(stormConf, conf.topic, (ZkHosts) conf.hosts);
++        }
++    }
++
++
++    public static long getOffset(SimpleConsumer consumer, String topic, int partition, KafkaConfig config) {
++        long startOffsetTime = kafka.api.OffsetRequest.LatestTime();
++        if (config.forceFromStart) {
++            startOffsetTime = config.startOffsetTime;
++        }
++        return getOffset(consumer, topic, partition, startOffsetTime);
++    }
++
++    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
++        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
++        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
++        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
++        OffsetRequest request = new OffsetRequest(
++                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
++
++        long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
++        if (offsets.length > 0) {
++            return offsets[0];
++        } else {
++            return NO_OFFSET;
++        }
++    }
++
++    public static class KafkaOffsetMetric implements IMetric {
++        Map<Partition, Long> _partitionToOffset = new HashMap<Partition, Long>();
++        Set<Partition> _partitions;
++        String _topic;
++        DynamicPartitionConnections _connections;
++
++        public KafkaOffsetMetric(String topic, DynamicPartitionConnections connections) {
++            _topic = topic;
++            _connections = connections;
++        }
++
++        public void setLatestEmittedOffset(Partition partition, long offset) {
++            _partitionToOffset.put(partition, offset);
++        }
++
++        @Override
++        public Object getValueAndReset() {
++            try {
++                long totalSpoutLag = 0;
++                long totalEarliestTimeOffset = 0;
++                long totalLatestTimeOffset = 0;
++                long totalLatestEmittedOffset = 0;
++                HashMap ret = new HashMap();
++                if (_partitions != null && _partitions.size() == _partitionToOffset.size()) {
++                    for (Map.Entry<Partition, Long> e : _partitionToOffset.entrySet()) {
++                        Partition partition = e.getKey();
++                        SimpleConsumer consumer = _connections.getConnection(partition);
++                        if (consumer == null) {
++                            LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
++                            return null;
++                        }
++                        long earliestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.EarliestTime());
++                        long latestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
++                        if (earliestTimeOffset == 0 || latestTimeOffset == 0) {
++                            LOG.warn("No data found in Kafka Partition " + partition.getId());
++                            return null;
++                        }
++                        long latestEmittedOffset = e.getValue();
++                        long spoutLag = latestTimeOffset - latestEmittedOffset;
++                        ret.put(partition.getId() + "/" + "spoutLag", spoutLag);
++                        ret.put(partition.getId() + "/" + "earliestTimeOffset", earliestTimeOffset);
++                        ret.put(partition.getId() + "/" + "latestTimeOffset", latestTimeOffset);
++                        ret.put(partition.getId() + "/" + "latestEmittedOffset", latestEmittedOffset);
++                        totalSpoutLag += spoutLag;
++                        totalEarliestTimeOffset += earliestTimeOffset;
++                        totalLatestTimeOffset += latestTimeOffset;
++                        totalLatestEmittedOffset += latestEmittedOffset;
++                    }
++                    ret.put("totalSpoutLag", totalSpoutLag);
++                    ret.put("totalEarliestTimeOffset", totalEarliestTimeOffset);
++                    ret.put("totalLatestTimeOffset", totalLatestTimeOffset);
++                    ret.put("totalLatestEmittedOffset", totalLatestEmittedOffset);
++                    return ret;
++                } else {
++                    LOG.info("Metrics Tick: Not enough data to calculate spout lag.");
++                }
++            } catch (Throwable t) {
++                LOG.warn("Metrics Tick: Exception when computing kafkaOffset metric.", t);
++            }
++            return null;
++        }
++
++        public void refreshPartitions(Set<Partition> partitions) {
++            _partitions = partitions;
++            Iterator<Partition> it = _partitionToOffset.keySet().iterator();
++            while (it.hasNext()) {
++                if (!partitions.contains(it.next())) {
++                    it.remove();
++                }
++            }
++        }
++    }
++
++    public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) {
++        ByteBufferMessageSet msgs = null;
++        String topic = config.topic;
++        int partitionId = partition.partition;
++        for (int errors = 0; errors < 2 && msgs == null; errors++) {
++            FetchRequestBuilder builder = new FetchRequestBuilder();
++            FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
++                    clientId(config.clientId).build();
++            FetchResponse fetchResponse;
++            try {
++                fetchResponse = consumer.fetch(fetchRequest);
++            } catch (Exception e) {
++                if (e instanceof ConnectException) {
++                    throw new FailedFetchException(e);
++                } else {
++                    throw new RuntimeException(e);
++                }
++            }
++            if (fetchResponse.hasError()) {
++                KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
++                if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) {
++                    long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime);
++                    LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " +
++                            "retrying with default start offset time from configuration. " +
++                            "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]");
++                    offset = startOffset;
++                } else {
++                    String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
++                    LOG.error(message);
++                    throw new FailedFetchException(message);
++                }
++            } else {
++                msgs = fetchResponse.messageSet(topic, partitionId);
++            }
++        }
++        return msgs;
++    }
++
++
++    public static Iterable<List<Object>> generateTuples(KafkaConfig kafkaConfig, Message msg) {
++        Iterable<List<Object>> tups;
++        ByteBuffer payload = msg.payload();
++        ByteBuffer key = msg.key();
++        if (key != null && kafkaConfig.scheme instanceof KeyValueSchemeAsMultiScheme) {
++            tups = ((KeyValueSchemeAsMultiScheme) kafkaConfig.scheme).deserializeKeyAndValue(Utils.toByteArray(key), Utils.toByteArray(payload));
++        } else {
++            tups = kafkaConfig.scheme.deserialize(Utils.toByteArray(payload));
++        }
++        return tups;
++    }
++
++
++    public static List<Partition> calculatePartitionsForTask(GlobalPartitionInformation partitionInformation, int totalTasks, int taskIndex) {
++        Preconditions.checkArgument(taskIndex < totalTasks, "task index must be less that total tasks");
++        List<Partition> partitions = partitionInformation.getOrderedPartitions();
++        int numPartitions = partitions.size();
++        if (numPartitions < totalTasks) {
++            LOG.warn("there are more tasks than partitions (tasks: " + totalTasks + "; partitions: " + numPartitions + "), some tasks will be idle");
++        }
++        List<Partition> taskPartitions = new ArrayList<Partition>();
++        for (int i = taskIndex; i < numPartitions; i += totalTasks) {
++            Partition taskPartition = partitions.get(i);
++            taskPartitions.add(taskPartition);
++        }
++        logPartitionMapping(totalTasks, taskIndex, taskPartitions);
++        return taskPartitions;
++    }
++
++    private static void logPartitionMapping(int totalTasks, int taskIndex, List<Partition> taskPartitions) {
++        String taskPrefix = taskId(taskIndex, totalTasks);
++        if (taskPartitions.isEmpty()) {
++            LOG.warn(taskPrefix + "no partitions assigned");
++        } else {
++            LOG.info(taskPrefix + "assigned " + taskPartitions);
++        }
++    }
++
++    public static String taskId(int taskIndex, int totalTasks) {
++        return "Task [" + (taskIndex + 1) + "/" + totalTasks + "] ";
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
index 0000000,0000000..df31cb8
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
@@@ -1,0 -1,0 +1,11 @@@
++package storm.kafka;
++
++import backtype.storm.spout.Scheme;
++
++import java.util.List;
++
++public interface KeyValueScheme extends Scheme {
++
++    public List<Object> deserializeKeyAndValue(byte[] key, byte[] value);
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
index 0000000,0000000..2412a1c
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
@@@ -1,0 -1,0 +1,19 @@@
++package storm.kafka;
++
++import backtype.storm.spout.SchemeAsMultiScheme;
++import java.util.Arrays;
++import java.util.List;
++
++public class KeyValueSchemeAsMultiScheme extends SchemeAsMultiScheme{
++
++    public KeyValueSchemeAsMultiScheme(KeyValueScheme scheme) {
++        super(scheme);
++    }
++
++    public Iterable<List<Object>> deserializeKeyAndValue(final byte[] key, final byte[] value) {
++        List<Object> o = ((KeyValueScheme)scheme).deserializeKeyAndValue(key, value);
++        if(o == null) return null;
++        else return Arrays.asList(o);
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/Partition.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/Partition.java
index 0000000,0000000..96a3ad7
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/Partition.java
@@@ -1,0 -1,0 +1,47 @@@
++package storm.kafka;
++
++import com.google.common.base.Objects;
++import storm.trident.spout.ISpoutPartition;
++
++
++public class Partition implements ISpoutPartition {
++
++    public final Broker host;
++    public final int partition;
++
++    public Partition(Broker host, int partition) {
++        this.host = host;
++        this.partition = partition;
++    }
++
++    @Override
++    public int hashCode() {
++        return Objects.hashCode(host, partition);
++    }
++
++    @Override
++    public boolean equals(Object obj) {
++        if (this == obj) {
++            return true;
++        }
++        if (obj == null || getClass() != obj.getClass()) {
++            return false;
++        }
++        final Partition other = (Partition) obj;
++        return Objects.equal(this.host, other.host) && Objects.equal(this.partition, other.partition);
++    }
++
++    @Override
++    public String toString() {
++        return "Partition{" +
++                "host=" + host +
++                ", partition=" + partition +
++                '}';
++    }
++
++    @Override
++    public String getId() {
++        return "partition_" + partition;
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
index 0000000,0000000..d28248d
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
@@@ -1,0 -1,0 +1,9 @@@
++package storm.kafka;
++
++import java.util.List;
++
++public interface PartitionCoordinator {
++    List<PartitionManager> getMyManagedPartitions();
++
++    PartitionManager getManager(Partition partition);
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
index 0000000,0000000..03075bb
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
@@@ -1,0 -1,0 +1,224 @@@
++package storm.kafka;
++
++import backtype.storm.Config;
++import backtype.storm.metric.api.CombinedMetric;
++import backtype.storm.metric.api.CountMetric;
++import backtype.storm.metric.api.MeanReducer;
++import backtype.storm.metric.api.ReducedMetric;
++import backtype.storm.spout.SpoutOutputCollector;
++import com.google.common.collect.ImmutableMap;
++import kafka.api.OffsetRequest;
++import kafka.javaapi.consumer.SimpleConsumer;
++import kafka.javaapi.message.ByteBufferMessageSet;
++import kafka.message.MessageAndOffset;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.KafkaSpout.EmitState;
++import storm.kafka.KafkaSpout.MessageAndRealOffset;
++import storm.kafka.trident.MaxMetric;
++
++import java.util.*;
++
++public class PartitionManager {
++    public static final Logger LOG = LoggerFactory.getLogger(PartitionManager.class);
++    private final CombinedMetric _fetchAPILatencyMax;
++    private final ReducedMetric _fetchAPILatencyMean;
++    private final CountMetric _fetchAPICallCount;
++    private final CountMetric _fetchAPIMessageCount;
++
++    static class KafkaMessageId {
++        public Partition partition;
++        public long offset;
++
++        public KafkaMessageId(Partition partition, long offset) {
++            this.partition = partition;
++            this.offset = offset;
++        }
++    }
++
++    Long _emittedToOffset;
++    SortedSet<Long> _pending = new TreeSet<Long>();
++    Long _committedTo;
++    LinkedList<MessageAndRealOffset> _waitingToEmit = new LinkedList<MessageAndRealOffset>();
++    Partition _partition;
++    SpoutConfig _spoutConfig;
++    String _topologyInstanceId;
++    SimpleConsumer _consumer;
++    DynamicPartitionConnections _connections;
++    ZkState _state;
++    Map _stormConf;
++
++
++    public PartitionManager(DynamicPartitionConnections connections, String topologyInstanceId, ZkState state, Map stormConf, SpoutConfig spoutConfig, Partition id) {
++        _partition = id;
++        _connections = connections;
++        _spoutConfig = spoutConfig;
++        _topologyInstanceId = topologyInstanceId;
++        _consumer = connections.register(id.host, id.partition);
++        _state = state;
++        _stormConf = stormConf;
++
++        String jsonTopologyId = null;
++        Long jsonOffset = null;
++        String path = committedPath();
++        try {
++            Map<Object, Object> json = _state.readJSON(path);
++            LOG.info("Read partition information from: " + path +  " --> " + json );
++            if (json != null) {
++                jsonTopologyId = (String) ((Map<Object, Object>) json.get("topology")).get("id");
++                jsonOffset = (Long) json.get("offset");
++            }
++        } catch (Throwable e) {
++            LOG.warn("Error reading and/or parsing at ZkNode: " + path, e);
++        }
++
++        if (jsonTopologyId == null || jsonOffset == null) { // failed to parse JSON?
++            _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition, spoutConfig);
++            LOG.info("No partition information found, using configuration to determine offset");
++        } else if (!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.forceFromStart) {
++            _committedTo = KafkaUtils.getOffset(_consumer, spoutConfig.topic, id.partition, spoutConfig.startOffsetTime);
++            LOG.info("Topology change detected and reset from start forced, using configuration to determine offset");
++        } else {
++            _committedTo = jsonOffset;
++            LOG.info("Read last commit offset from zookeeper: " + _committedTo + "; old topology_id: " + jsonTopologyId + " - new topology_id: " + topologyInstanceId );
++        }
++
++        LOG.info("Starting " + _partition + " from offset " + _committedTo);
++        _emittedToOffset = _committedTo;
++
++        _fetchAPILatencyMax = new CombinedMetric(new MaxMetric());
++        _fetchAPILatencyMean = new ReducedMetric(new MeanReducer());
++        _fetchAPICallCount = new CountMetric();
++        _fetchAPIMessageCount = new CountMetric();
++    }
++
++    public Map getMetricsDataMap() {
++        Map ret = new HashMap();
++        ret.put(_partition + "/fetchAPILatencyMax", _fetchAPILatencyMax.getValueAndReset());
++        ret.put(_partition + "/fetchAPILatencyMean", _fetchAPILatencyMean.getValueAndReset());
++        ret.put(_partition + "/fetchAPICallCount", _fetchAPICallCount.getValueAndReset());
++        ret.put(_partition + "/fetchAPIMessageCount", _fetchAPIMessageCount.getValueAndReset());
++        return ret;
++    }
++
++    //returns false if it's reached the end of current batch
++    public EmitState next(SpoutOutputCollector collector) {
++        if (_waitingToEmit.isEmpty()) {
++            fill();
++        }
++        while (true) {
++            MessageAndRealOffset toEmit = _waitingToEmit.pollFirst();
++            if (toEmit == null) {
++                return EmitState.NO_EMITTED;
++            }
++            Iterable<List<Object>> tups = KafkaUtils.generateTuples(_spoutConfig, toEmit.msg);
++            if (tups != null) {
++                for (List<Object> tup : tups) {
++                    collector.emit(tup, new KafkaMessageId(_partition, toEmit.offset));
++                }
++                break;
++            } else {
++                ack(toEmit.offset);
++            }
++        }
++        if (!_waitingToEmit.isEmpty()) {
++            return EmitState.EMITTED_MORE_LEFT;
++        } else {
++            return EmitState.EMITTED_END;
++        }
++    }
++
++    private void fill() {
++        long start = System.nanoTime();
++        ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, _emittedToOffset);
++        long end = System.nanoTime();
++        long millis = (end - start) / 1000000;
++        _fetchAPILatencyMax.update(millis);
++        _fetchAPILatencyMean.update(millis);
++        _fetchAPICallCount.incr();
++        int numMessages = countMessages(msgs);
++        _fetchAPIMessageCount.incrBy(numMessages);
++
++        if (numMessages > 0) {
++            LOG.info("Fetched " + numMessages + " messages from: " + _partition);
++        }
++        for (MessageAndOffset msg : msgs) {
++            _pending.add(_emittedToOffset);
++            _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
++            _emittedToOffset = msg.nextOffset();
++        }
++        if (numMessages > 0) {
++            LOG.info("Added " + numMessages + " messages from: " + _partition + " to internal buffers");
++        }
++    }
++
++    private int countMessages(ByteBufferMessageSet messageSet) {
++        int counter = 0;
++        for (MessageAndOffset messageAndOffset : messageSet) {
++            counter = counter + 1;
++        }
++        return counter;
++    }
++
++    public void ack(Long offset) {
++        _pending.remove(offset);
++    }
++
++    public void fail(Long offset) {
++        //TODO: should it use in-memory ack set to skip anything that's been acked but not committed???
++        // things might get crazy with lots of timeouts
++        if (_emittedToOffset > offset) {
++            _emittedToOffset = offset;
++            _pending.tailSet(offset).clear();
++        }
++    }
++
++    public void commit() {
++        long lastCompletedOffset = lastCompletedOffset();
++        if (lastCompletedOffset != lastCommittedOffset()) {
++            LOG.info("Writing last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
++            Map<Object, Object> data = ImmutableMap.builder()
++                    .put("topology", ImmutableMap.of("id", _topologyInstanceId,
++                            "name", _stormConf.get(Config.TOPOLOGY_NAME)))
++                    .put("offset", lastCompletedOffset)
++                    .put("partition", _partition.partition)
++                    .put("broker", ImmutableMap.of("host", _partition.host.host,
++                            "port", _partition.host.port))
++                    .put("topic", _spoutConfig.topic).build();
++            _state.writeJSON(committedPath(), data);
++            _committedTo = lastCompletedOffset;
++            LOG.info("Wrote last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
++        } else {
++            LOG.info("No new offset for " + _partition + " for topology: " + _topologyInstanceId);
++        }
++    }
++
++    private String committedPath() {
++        return _spoutConfig.zkRoot + "/" + _spoutConfig.id + "/" + _partition.getId();
++    }
++
++    public long queryPartitionOffsetLatestTime() {
++        return KafkaUtils.getOffset(_consumer, _spoutConfig.topic, _partition.partition,
++                OffsetRequest.LatestTime());
++    }
++
++    public long lastCommittedOffset() {
++        return _committedTo;
++    }
++
++    public long lastCompletedOffset() {
++        if (_pending.isEmpty()) {
++            return _emittedToOffset;
++        } else {
++            return _pending.first();
++        }
++    }
++
++    public Partition getPartition() {
++        return _partition;
++    }
++
++    public void close() {
++        _connections.unregister(_partition.host, _partition.partition);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
index 0000000,0000000..05551ec
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
@@@ -1,0 -1,0 +1,19 @@@
++package storm.kafka;
++
++import java.io.Serializable;
++import java.util.List;
++
++
++public class SpoutConfig extends KafkaConfig implements Serializable {
++    public List<String> zkServers = null;
++    public Integer zkPort = null;
++    public String zkRoot = null;
++    public String id = null;
++    public long stateUpdateIntervalMs = 2000;
++
++    public SpoutConfig(BrokerHosts hosts, String topic, String zkRoot, String id) {
++        super(hosts, topic);
++        this.zkRoot = zkRoot;
++        this.id = id;
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
index 0000000,0000000..040060c
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
@@@ -1,0 -1,0 +1,31 @@@
++package storm.kafka;
++
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++
++
++public class StaticCoordinator implements PartitionCoordinator {
++    Map<Partition, PartitionManager> _managers = new HashMap<Partition, PartitionManager>();
++    List<PartitionManager> _allManagers = new ArrayList();
++
++    public StaticCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig config, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
++        StaticHosts hosts = (StaticHosts) config.hosts;
++        List<Partition> myPartitions = KafkaUtils.calculatePartitionsForTask(hosts.getPartitionInformation(), totalTasks, taskIndex);
++        for (Partition myPartition : myPartitions) {
++            _managers.put(myPartition, new PartitionManager(connections, topologyInstanceId, state, stormConf, config, myPartition));
++        }
++        _allManagers = new ArrayList(_managers.values());
++    }
++
++    @Override
++    public List<PartitionManager> getMyManagedPartitions() {
++        return _allManagers;
++    }
++
++    public PartitionManager getManager(Partition partition) {
++        return _managers.get(partition);
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
index 0000000,0000000..9ed7193
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
@@@ -1,0 -1,0 +1,21 @@@
++package storm.kafka;
++
++import storm.kafka.trident.GlobalPartitionInformation;
++
++/**
++ * Date: 11/05/2013
++ * Time: 14:43
++ */
++public class StaticHosts implements BrokerHosts {
++
++
++    private GlobalPartitionInformation partitionInformation;
++
++    public StaticHosts(GlobalPartitionInformation partitionInformation) {
++        this.partitionInformation = partitionInformation;
++    }
++
++    public GlobalPartitionInformation getPartitionInformation() {
++        return partitionInformation;
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
index 0000000,0000000..a9b9db1
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
@@@ -1,0 -1,0 +1,35 @@@
++package storm.kafka;
++
++import kafka.javaapi.consumer.SimpleConsumer;
++
++import java.util.HashMap;
++import java.util.Map;
++
++public class StaticPartitionConnections {
++    Map<Integer, SimpleConsumer> _kafka = new HashMap<Integer, SimpleConsumer>();
++    KafkaConfig _config;
++    StaticHosts hosts;
++
++    public StaticPartitionConnections(KafkaConfig conf) {
++        _config = conf;
++        if (!(conf.hosts instanceof StaticHosts)) {
++            throw new RuntimeException("Must configure with static hosts");
++        }
++        this.hosts = (StaticHosts) conf.hosts;
++    }
++
++    public SimpleConsumer getConsumer(int partition) {
++        if (!_kafka.containsKey(partition)) {
++            Broker hp = hosts.getPartitionInformation().getBrokerFor(partition);
++            _kafka.put(partition, new SimpleConsumer(hp.host, hp.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId));
++
++        }
++        return _kafka.get(partition);
++    }
++
++    public void close() {
++        for (SimpleConsumer consumer : _kafka.values()) {
++            consumer.close();
++        }
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
index 0000000,0000000..a6adddb
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
@@@ -1,0 -1,0 +1,20 @@@
++package storm.kafka;
++
++import backtype.storm.tuple.Values;
++import com.google.common.collect.ImmutableMap;
++
++import java.util.List;
++
++public class StringKeyValueScheme extends StringScheme implements KeyValueScheme {
++
++    @Override
++    public List<Object> deserializeKeyAndValue(byte[] key, byte[] value) {
++        if ( key == null ) {
++            return deserialize(value);
++        }
++        String keyString = StringScheme.deserializeString(key);
++        String valueString = StringScheme.deserializeString(value);
++        return new Values(ImmutableMap.of(keyString, valueString));
++    }
++
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
index 0000000,0000000..a809448
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
@@@ -1,0 -1,0 +1,29 @@@
++package storm.kafka;
++
++import backtype.storm.spout.Scheme;
++import backtype.storm.tuple.Fields;
++import backtype.storm.tuple.Values;
++
++import java.io.UnsupportedEncodingException;
++import java.util.List;
++
++public class StringScheme implements Scheme {
++
++    public static final String STRING_SCHEME_KEY = "str";
++
++    public List<Object> deserialize(byte[] bytes) {
++        return new Values(deserializeString(bytes));
++    }
++
++    public static String deserializeString(byte[] string) {
++        try {
++            return new String(string, "UTF-8");
++        } catch (UnsupportedEncodingException e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++    public Fields getOutputFields() {
++        return new Fields(STRING_SCHEME_KEY);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
index 0000000,0000000..ec35aed
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
@@@ -1,0 -1,0 +1,95 @@@
++package storm.kafka;
++
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++import storm.kafka.trident.GlobalPartitionInformation;
++
++import java.util.*;
++
++import static storm.kafka.KafkaUtils.taskId;
++
++public class ZkCoordinator implements PartitionCoordinator {
++    public static final Logger LOG = LoggerFactory.getLogger(ZkCoordinator.class);
++
++    SpoutConfig _spoutConfig;
++    int _taskIndex;
++    int _totalTasks;
++    String _topologyInstanceId;
++    Map<Partition, PartitionManager> _managers = new HashMap();
++    List<PartitionManager> _cachedList;
++    Long _lastRefreshTime = null;
++    int _refreshFreqMs;
++    DynamicPartitionConnections _connections;
++    DynamicBrokersReader _reader;
++    ZkState _state;
++    Map _stormConf;
++
++    public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
++        this(connections, stormConf, spoutConfig, state, taskIndex, totalTasks, topologyInstanceId, buildReader(stormConf, spoutConfig));
++    }
++
++    public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId, DynamicBrokersReader reader) {
++        _spoutConfig = spoutConfig;
++        _connections = connections;
++        _taskIndex = taskIndex;
++        _totalTasks = totalTasks;
++        _topologyInstanceId = topologyInstanceId;
++        _stormConf = stormConf;
++        _state = state;
++        ZkHosts brokerConf = (ZkHosts) spoutConfig.hosts;
++        _refreshFreqMs = brokerConf.refreshFreqSecs * 1000;
++        _reader = reader;
++    }
++
++    private static DynamicBrokersReader buildReader(Map stormConf, SpoutConfig spoutConfig) {
++        ZkHosts hosts = (ZkHosts) spoutConfig.hosts;
++        return new DynamicBrokersReader(stormConf, hosts.brokerZkStr, hosts.brokerZkPath, spoutConfig.topic);
++    }
++
++    @Override
++    public List<PartitionManager> getMyManagedPartitions() {
++        if (_lastRefreshTime == null || (System.currentTimeMillis() - _lastRefreshTime) > _refreshFreqMs) {
++            refresh();
++            _lastRefreshTime = System.currentTimeMillis();
++        }
++        return _cachedList;
++    }
++
++    void refresh() {
++        try {
++            LOG.info(taskId(_taskIndex, _totalTasks) + "Refreshing partition manager connections");
++            GlobalPartitionInformation brokerInfo = _reader.getBrokerInfo();
++            List<Partition> mine = KafkaUtils.calculatePartitionsForTask(brokerInfo, _totalTasks, _taskIndex);
++
++            Set<Partition> curr = _managers.keySet();
++            Set<Partition> newPartitions = new HashSet<Partition>(mine);
++            newPartitions.removeAll(curr);
++
++            Set<Partition> deletedPartitions = new HashSet<Partition>(curr);
++            deletedPartitions.removeAll(mine);
++
++            LOG.info(taskId(_taskIndex, _totalTasks) + "Deleted partition managers: " + deletedPartitions.toString());
++
++            for (Partition id : deletedPartitions) {
++                PartitionManager man = _managers.remove(id);
++                man.close();
++            }
++            LOG.info(taskId(_taskIndex, _totalTasks) + "New partition managers: " + newPartitions.toString());
++
++            for (Partition id : newPartitions) {
++                PartitionManager man = new PartitionManager(_connections, _topologyInstanceId, _state, _stormConf, _spoutConfig, id);
++                _managers.put(id, man);
++            }
++
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++        _cachedList = new ArrayList<PartitionManager>(_managers.values());
++        LOG.info(taskId(_taskIndex, _totalTasks) + "Finished refreshing");
++    }
++
++    @Override
++    public PartitionManager getManager(Partition partition) {
++        return _managers.get(partition);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
index 0000000,0000000..f2e0fc2
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
@@@ -1,0 -1,0 +1,22 @@@
++package storm.kafka;
++
++/**
++ * Date: 11/05/2013
++ * Time: 14:38
++ */
++public class ZkHosts implements BrokerHosts {
++    private static final String DEFAULT_ZK_PATH = "/brokers";
++
++    public String brokerZkStr = null;
++    public String brokerZkPath = null; // e.g., /kafka/brokers
++    public int refreshFreqSecs = 60;
++
++    public ZkHosts(String brokerZkStr, String brokerZkPath) {
++        this.brokerZkStr = brokerZkStr;
++        this.brokerZkPath = brokerZkPath;
++    }
++
++    public ZkHosts(String brokerZkStr) {
++        this(brokerZkStr, DEFAULT_ZK_PATH);
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/ZkState.java
index 0000000,0000000..d5416af
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
@@@ -1,0 -1,0 +1,99 @@@
++package storm.kafka;
++
++import backtype.storm.Config;
++import backtype.storm.utils.Utils;
++import com.netflix.curator.framework.CuratorFramework;
++import com.netflix.curator.framework.CuratorFrameworkFactory;
++import com.netflix.curator.retry.RetryNTimes;
++import org.apache.zookeeper.CreateMode;
++import org.json.simple.JSONValue;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++import java.nio.charset.Charset;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++
++public class ZkState {
++    public static final Logger LOG = LoggerFactory.getLogger(ZkState.class);
++    CuratorFramework _curator;
++
++    private CuratorFramework newCurator(Map stateConf) throws Exception {
++        Integer port = (Integer) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
++        String serverPorts = "";
++        for (String server : (List<String>) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
++            serverPorts = serverPorts + server + ":" + port + ",";
++        }
++        return CuratorFrameworkFactory.newClient(serverPorts,
++                Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
++                15000,
++                new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
++                        Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
++    }
++
++    public CuratorFramework getCurator() {
++        assert _curator != null;
++        return _curator;
++    }
++
++    public ZkState(Map stateConf) {
++        stateConf = new HashMap(stateConf);
++
++        try {
++            _curator = newCurator(stateConf);
++            _curator.start();
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++    public void writeJSON(String path, Map<Object, Object> data) {
++        LOG.info("Writing " + path + " the data " + data.toString());
++        writeBytes(path, JSONValue.toJSONString(data).getBytes(Charset.forName("UTF-8")));
++    }
++
++    public void writeBytes(String path, byte[] bytes) {
++        try {
++            if (_curator.checkExists().forPath(path) == null) {
++                _curator.create()
++                        .creatingParentsIfNeeded()
++                        .withMode(CreateMode.PERSISTENT)
++                        .forPath(path, bytes);
++            } else {
++                _curator.setData().forPath(path, bytes);
++            }
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++    public Map<Object, Object> readJSON(String path) {
++        try {
++            byte[] b = readBytes(path);
++            if (b == null) {
++                return null;
++            }
++            return (Map<Object, Object>) JSONValue.parse(new String(b, "UTF-8"));
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++    public byte[] readBytes(String path) {
++        try {
++            if (_curator.checkExists().forPath(path) != null) {
++                return _curator.getData().forPath(path);
++            } else {
++                return null;
++            }
++        } catch (Exception e) {
++            throw new RuntimeException(e);
++        }
++    }
++
++    public void close() {
++        _curator.close();
++        _curator = null;
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
index 0000000,0000000..89969d9
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
@@@ -1,0 -1,0 +1,72 @@@
++package storm.kafka.bolt;
++
++import backtype.storm.task.OutputCollector;
++import backtype.storm.task.TopologyContext;
++import backtype.storm.topology.OutputFieldsDeclarer;
++import backtype.storm.topology.base.BaseRichBolt;
++import backtype.storm.tuple.Tuple;
++import kafka.javaapi.producer.Producer;
++import kafka.producer.KeyedMessage;
++import kafka.producer.ProducerConfig;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++import java.util.Map;
++import java.util.Properties;
++
++
++/**
++ * Bolt implementation that can send Tuple data to Kafka
++ * <p/>
++ * It expects the producer configuration and topic in storm config under
++ * <p/>
++ * 'kafka.broker.properties' and 'topic'
++ * <p/>
++ * respectively.
++ */
++public class KafkaBolt<K, V> extends BaseRichBolt {
++
++    private static final Logger LOG = LoggerFactory.getLogger(KafkaBolt.class);
++
++    public static final String TOPIC = "topic";
++    public static final String KAFKA_BROKER_PROPERTIES = "kafka.broker.properties";
++
++    public static final String BOLT_KEY = "key";
++    public static final String BOLT_MESSAGE = "message";
++
++    private Producer<K, V> producer;
++    private OutputCollector collector;
++    private String topic;
++
++    @Override
++    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
++        Map configMap = (Map) stormConf.get(KAFKA_BROKER_PROPERTIES);
++        Properties properties = new Properties();
++        properties.putAll(configMap);
++        ProducerConfig config = new ProducerConfig(properties);
++        producer = new Producer<K, V>(config);
++        this.topic = (String) stormConf.get(TOPIC);
++        this.collector = collector;
++    }
++
++    @Override
++    public void execute(Tuple input) {
++        K key = null;
++        if (input.contains(BOLT_KEY)) {
++            key = (K) input.getValueByField(BOLT_KEY);
++        }
++        V message = (V) input.getValueByField(BOLT_MESSAGE);
++        try {
++            producer.send(new KeyedMessage<K, V>(topic, key, message));
++        } catch (Exception ex) {
++            LOG.error("Could not send message with key '" + key + "' and value '" + message + "'", ex);
++        } finally {
++            collector.ack(input);
++        }
++    }
++
++    @Override
++    public void declareOutputFields(OutputFieldsDeclarer declarer) {
++
++    }
++}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/7d1bf2a9/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
----------------------------------------------------------------------
diff --cc external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
index 0000000,0000000..f67acaa
new file mode 100644
--- /dev/null
+++ b/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
@@@ -1,0 -1,0 +1,37 @@@
++package storm.kafka.trident;
++
++import storm.kafka.KafkaUtils;
++import storm.trident.spout.IOpaquePartitionedTridentSpout;
++import storm.trident.spout.IPartitionedTridentSpout;
++
++import java.util.Map;
++
++/**
++ * Date: 11/05/2013
++ * Time: 19:35
++ */
++class Coordinator implements IPartitionedTridentSpout.Coordinator<GlobalPartitionInformation>, IOpaquePartitionedTridentSpout.Coordinator<GlobalPartitionInformation> {
++
++    private IBrokerReader reader;
++    private TridentKafkaConfig config;
++
++    public Coordinator(Map conf, TridentKafkaConfig tridentKafkaConfig) {
++        config = tridentKafkaConfig;
++        reader = KafkaUtils.makeBrokerReader(conf, config);
++    }
++
++    @Override
++    public void close() {
++        config.coordinator.close();
++    }
++
++    @Override
++    public boolean isReady(long txid) {
++        return config.coordinator.isReady(txid);
++    }
++
++    @Override
++    public GlobalPartitionInformation getPartitionsForBatch() {
++        return reader.getCurrentBrokers();
++    }
++}


[26/50] [abbrv] git commit: Add a new per-partition and total metric, "earliestTimeOffset". Rename "latestTime" metric to "latestTimeOffset" to more closely conform to other, related metrics' nomenclature.

Posted by pt...@apache.org.
Add a new per-partition and total metric, "earliestTimeOffset". Rename
"latestTime" metric to "latestTimeOffset" to more closely conform to
other, related metrics' nomenclature.

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/c695c1b0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/c695c1b0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/c695c1b0

Branch: refs/heads/master
Commit: c695c1b06cd1f44fbbe1181a41a6c4ffe2186e01
Parents: bd0cc45
Author: Danijel Schiavuzzi <da...@infobip.com>
Authored: Thu Feb 20 18:09:25 2014 +0100
Committer: Danijel Schiavuzzi <da...@infobip.com>
Committed: Mon Feb 24 17:05:45 2014 +0100

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaUtils.java | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/c695c1b0/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaUtils.java b/src/jvm/storm/kafka/KafkaUtils.java
index 300d998..eed438f 100644
--- a/src/jvm/storm/kafka/KafkaUtils.java
+++ b/src/jvm/storm/kafka/KafkaUtils.java
@@ -79,6 +79,7 @@ public class KafkaUtils {
         public Object getValueAndReset() {
             try {
                 long totalSpoutLag = 0;
+                long totalEarliestTimeOffset = 0;
                 long totalLatestTimeOffset = 0;
                 long totalLatestEmittedOffset = 0;
                 HashMap ret = new HashMap();
@@ -90,22 +91,26 @@ public class KafkaUtils {
                             LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
                             return null;
                         }
+                        long earliestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.EarliestTime()); 
                         long latestTimeOffset = getOffset(consumer, _topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
-                        if (latestTimeOffset == 0) {
+                        if (earliestTimeOffset == 0 || latestTimeOffset == 0) {
                             LOG.warn("No data found in Kafka Partition " + partition.getId());
                             return null;
                         }
                         long latestEmittedOffset = e.getValue();
                         long spoutLag = latestTimeOffset - latestEmittedOffset;
                         ret.put(partition.getId() + "/" + "spoutLag", spoutLag);
-                        ret.put(partition.getId() + "/" + "latestTime", latestTimeOffset);
+                        ret.put(partition.getId() + "/" + "earliestTimeOffset", earliestTimeOffset);
+                        ret.put(partition.getId() + "/" + "latestTimeOffset", latestTimeOffset);
                         ret.put(partition.getId() + "/" + "latestEmittedOffset", latestEmittedOffset);
                         totalSpoutLag += spoutLag;
+                        totalEarliestTimeOffset += earliestTimeOffset;
                         totalLatestTimeOffset += latestTimeOffset;
                         totalLatestEmittedOffset += latestEmittedOffset;
                     }
                     ret.put("totalSpoutLag", totalSpoutLag);
-                    ret.put("totalLatestTime", totalLatestTimeOffset);
+                    ret.put("totalEarliestTimeOffset", totalEarliestTimeOffset);
+                    ret.put("totalLatestTimeOffset", totalLatestTimeOffset);
                     ret.put("totalLatestEmittedOffset", totalLatestEmittedOffset);
                     return ret;
                 } else {


[18/50] [abbrv] git commit: preparing for tag

Posted by pt...@apache.org.
preparing for tag


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/37aa116d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/37aa116d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/37aa116d

Branch: refs/heads/master
Commit: 37aa116d7aba38c7600b669589bc9458e328bb4a
Parents: e9357ec
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Feb 8 08:32:25 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Feb 8 08:32:25 2014 +0000

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/37aa116d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8defde4..6de3e7f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
     <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.3.0-SNAPSHOT</version>
+    <version>0.3.0</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
     <licenses>


[11/50] [abbrv] git commit: added OFFSET_METADATA_TOO_LARGE error

Posted by pt...@apache.org.
added OFFSET_METADATA_TOO_LARGE error


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/80005ba3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/80005ba3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/80005ba3

Branch: refs/heads/master
Commit: 80005ba3916e2a05c82649dd67f11b0f813c41b6
Parents: 95c60db
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Jan 12 18:21:56 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Jan 12 18:24:37 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaError.java      |  5 ++--
 src/test/storm/kafka/KafkaErrorTest.java | 39 +++++++++++++++++++++++++++
 2 files changed, 42 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/80005ba3/src/jvm/storm/kafka/KafkaError.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaError.java b/src/jvm/storm/kafka/KafkaError.java
index 260ab91..a67335c 100644
--- a/src/jvm/storm/kafka/KafkaError.java
+++ b/src/jvm/storm/kafka/KafkaError.java
@@ -17,10 +17,11 @@ public enum KafkaError {
     REPLICA_NOT_AVAILABLE,
     MESSAGE_SIZE_TOO_LARGE,
     STALE_CONTROLLER_EPOCH,
+    OFFSET_METADATA_TOO_LARGE,
     UNKNOWN;
 
-    public static KafkaError getError(short errorCode) {
-        if (errorCode < 0) {
+    public static KafkaError getError(int errorCode) {
+        if (errorCode < 0 || errorCode >= UNKNOWN.ordinal()) {
             return UNKNOWN;
         } else {
             return values()[errorCode];

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/80005ba3/src/test/storm/kafka/KafkaErrorTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaErrorTest.java b/src/test/storm/kafka/KafkaErrorTest.java
new file mode 100644
index 0000000..e5f9db2
--- /dev/null
+++ b/src/test/storm/kafka/KafkaErrorTest.java
@@ -0,0 +1,39 @@
+package storm.kafka;
+
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Date: 12/01/2014
+ * Time: 18:09
+ */
+public class KafkaErrorTest {
+
+    @Test
+    public void getError() {
+        assertThat(KafkaError.getError(0), is(equalTo(KafkaError.NO_ERROR)));
+    }
+
+    @Test
+    public void offsetMetaDataTooLarge() {
+        assertThat(KafkaError.getError(12), is(equalTo(KafkaError.OFFSET_METADATA_TOO_LARGE)));
+    }
+
+    @Test
+    public void unknownNegative() {
+        assertThat(KafkaError.getError(-1), is(equalTo(KafkaError.UNKNOWN)));
+    }
+
+    @Test
+    public void unknownPositive() {
+        assertThat(KafkaError.getError(75), is(equalTo(KafkaError.UNKNOWN)));
+    }
+
+    @Test
+    public void unknown() {
+        assertThat(KafkaError.getError(13), is(equalTo(KafkaError.UNKNOWN)));
+    }
+}


[36/50] [abbrv] git commit: Update README.md

Posted by pt...@apache.org.
Update README.md

added yourkit acknowledgement

Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/93ca6545
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/93ca6545
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/93ca6545

Branch: refs/heads/master
Commit: 93ca65457201948d6be1f994f5a046ea0fe477a1
Parents: 9129536
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Thu Apr 3 21:18:08 2014 +0100
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Thu Apr 3 21:18:08 2014 +0100

----------------------------------------------------------------------
 README.md | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/93ca6545/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index 13b5b2c..874db01 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@ storm-kafka-0.8-plus
 
 Port of storm-kafka to support kafka >= 0.8
 
-##Usage
+##Usage:
 For information on how to use this library in your project see:
 
 [https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus](https://clojars.org/net.wurstmeister.storm/storm-kafka-0.8-plus)
@@ -12,3 +12,11 @@ For information on how to use this library in your project see:
 ##Example Topologies:
 
 [https://github.com/wurstmeister/storm-kafka-0.8-plus-test](https://github.com/wurstmeister/storm-kafka-0.8-plus-test)
+
+##Acknowledgement:
+
+YourKit is kindly supporting this open source project with its full-featured Java Profiler.
+YourKit, LLC is the creator of innovative and intelligent tools for profiling
+Java and .NET applications. Take a look at YourKit's leading software products:
+<a href="http://www.yourkit.com/java/profiler/index.jsp">YourKit Java Profiler</a> and
+<a href="http://www.yourkit.com/.net/profiler/index.jsp">YourKit .NET Profiler</a>.


[25/50] [abbrv] git commit: fixed tests

Posted by pt...@apache.org.
fixed tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/bd0cc453
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/bd0cc453
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/bd0cc453

Branch: refs/heads/master
Commit: bd0cc453a5d7389eb2a92bacef46c13b436a0316
Parents: 4de85c8
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Feb 23 16:10:38 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Feb 23 16:10:38 2014 +0000

----------------------------------------------------------------------
 src/test/storm/kafka/KafkaUtilsTest.java | 31 ++++++++++++++++-----------
 1 file changed, 18 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/bd0cc453/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/KafkaUtilsTest.java b/src/test/storm/kafka/KafkaUtilsTest.java
index db270c2..0763042 100644
--- a/src/test/storm/kafka/KafkaUtilsTest.java
+++ b/src/test/storm/kafka/KafkaUtilsTest.java
@@ -28,13 +28,14 @@ public class KafkaUtilsTest {
     private KafkaTestBroker broker;
     private SimpleConsumer simpleConsumer;
     private KafkaConfig config;
+    private BrokerHosts brokerHosts;
 
     @Before
     public void setup() {
         broker = new KafkaTestBroker();
         GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation();
         globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
-        BrokerHosts brokerHosts = new StaticHosts(globalPartitionInformation);
+        brokerHosts = new StaticHosts(globalPartitionInformation);
         config = new KafkaConfig(brokerHosts, "testTopic");
         simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
     }
@@ -60,19 +61,31 @@ public class KafkaUtilsTest {
 
     @Test
     public void fetchMessage() throws Exception {
-        long lastOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
-        sendMessageAndAssertValueForOffset(lastOffset);
+        String value = "test";
+        createTopicAndSendMessage(value);
+        long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
+        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
+        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
+        assertThat(message, is(equalTo(value)));
     }
 
     @Test(expected = FailedFetchException.class)
     public void fetchMessagesWithInvalidOffsetAndDefaultHandlingDisabled() throws Exception {
         config.useStartOffsetTimeIfOffsetOutOfRange = false;
-        sendMessageAndAssertValueForOffset(-99);
+        KafkaUtils.fetchMessages(config, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), -99);
     }
 
     @Test
     public void fetchMessagesWithInvalidOffsetAndDefaultHandlingEnabled() throws Exception {
-        sendMessageAndAssertValueForOffset(-99);
+        config = new KafkaConfig(brokerHosts, "newTopic");
+        String value = "test";
+        createTopicAndSendMessage(value);
+        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), -99);
+        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
+        assertThat(message, is(equalTo(value)));
     }
 
     @Test
@@ -165,12 +178,4 @@ public class KafkaUtilsTest {
         Producer<String, String> producer = new Producer<String, String>(producerConfig);
         producer.send(new KeyedMessage<String, String>(config.topic, key, value));
     }
-
-    private void sendMessageAndAssertValueForOffset(long offset) {
-        String value = "test";
-        createTopicAndSendMessage(value);
-        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
-        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
-        assertThat(message, is(equalTo(value)));
-    }
 }


[19/50] [abbrv] git commit: update to next snapshot version

Posted by pt...@apache.org.
update to next snapshot version


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/312408a5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/312408a5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/312408a5

Branch: refs/heads/master
Commit: 312408a5c2d22bc2a1578d940c92417e05f94412
Parents: 37aa116
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sat Feb 8 08:47:12 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sat Feb 8 08:47:12 2014 +0000

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/312408a5/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 6de3e7f..910041a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
     <groupId>net.wurstmeister.storm</groupId>
     <artifactId>storm-kafka-0.8-plus</artifactId>
     <packaging>jar</packaging>
-    <version>0.3.0</version>
+    <version>0.4.0-SNAPSHOT</version>
     <name>storm-kafka-0.8-plus</name>
     <description>Storm module for kafka &gt; 0.8</description>
     <licenses>


[23/50] [abbrv] git commit: use metricsTimeBucketSizeInSecs consistently

Posted by pt...@apache.org.
use metricsTimeBucketSizeInSecs consistently


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/f8afa99c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/f8afa99c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/f8afa99c

Branch: refs/heads/master
Commit: f8afa99c3036918291c694163e81fd4f22e0460b
Parents: 71119ce
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Wed Feb 19 22:55:45 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Wed Feb 19 22:55:45 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/KafkaSpout.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/f8afa99c/src/jvm/storm/kafka/KafkaSpout.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/KafkaSpout.java b/src/jvm/storm/kafka/KafkaSpout.java
index d097510..79e33fe 100644
--- a/src/jvm/storm/kafka/KafkaSpout.java
+++ b/src/jvm/storm/kafka/KafkaSpout.java
@@ -93,7 +93,7 @@ public class KafkaSpout extends BaseRichSpout {
                 }
                 return _kafkaOffsetMetric.getValueAndReset();
             }
-        }, 60);
+        }, _spoutConfig.metricsTimeBucketSizeInSecs);
 
         context.registerMetric("kafkaPartition", new IMetric() {
             @Override
@@ -105,7 +105,7 @@ public class KafkaSpout extends BaseRichSpout {
                 }
                 return concatMetricsDataMaps;
             }
-        }, 60);
+        }, _spoutConfig.metricsTimeBucketSizeInSecs);
     }
 
     @Override


[30/50] [abbrv] git commit: Merge remote-tracking branch 'original/master' into merge

Posted by pt...@apache.org.
Merge remote-tracking branch 'original/master' into merge


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/9796b527
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/9796b527
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/9796b527

Branch: refs/heads/master
Commit: 9796b5270450cbb914ada62996dd631f92209f4e
Parents: bd0cc45 61be2a0
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Tue Mar 18 11:35:33 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Tue Mar 18 11:35:33 2014 -0400

----------------------------------------------------------------------

----------------------------------------------------------------------



[50/50] [abbrv] git commit: STORM-208: update changelog

Posted by pt...@apache.org.
STORM-208: update changelog


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/c4ea0cae
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/c4ea0cae
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/c4ea0cae

Branch: refs/heads/master
Commit: c4ea0caeef0a0923920c3065de997f04240318b7
Parents: 98265c7
Author: P. Taylor Goetz <pt...@gmail.com>
Authored: Mon Apr 21 15:38:38 2014 -0400
Committer: P. Taylor Goetz <pt...@gmail.com>
Committed: Mon Apr 21 15:38:38 2014 -0400

----------------------------------------------------------------------
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/c4ea0cae/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 81a19ab..8a75853 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
 ## 0.9.2-incubating (unreleased)
+ * STORM-208: Add storm-kafka as an external module
  * STORM-285: Fix storm-core shade plugin config
  * STORM-12: reduce thread usage of netty transport
  * STORM-281: fix and issue with config parsing that could lead to leaking file descriptors


[17/50] [abbrv] git commit: added profile for scala 2.10 build

Posted by pt...@apache.org.
added profile for scala 2.10 build


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/e9357eca
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/e9357eca
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/e9357eca

Branch: refs/heads/master
Commit: e9357eca4881704c398acbe60dda228ad4c7beec
Parents: aabcd58
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Feb 2 19:24:47 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Feb 2 19:25:13 2014 +0000

----------------------------------------------------------------------
 pom.xml | 29 +++++++++++++++++++++++++++--
 1 file changed, 27 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/e9357eca/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8dfd316..8defde4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -22,7 +22,21 @@
         <developerConnection>scm:git:ssh://git@github.com/wurstmeister/storm-kafka-0.8-plus.git</developerConnection>
         <url>https://github.com/wurstmeister/storm-kafka-0.8-plus</url>
     </scm>
+    <properties>
+        <scalaVersion>2.9.2</scalaVersion>
+        <kafkaArtifact>kafka_2.9.2</kafkaArtifact>
+        <envClassifier></envClassifier>
+    </properties>
     <build>
+        <plugins>
+            <plugin>
+                <artifactId>maven-jar-plugin</artifactId>
+                <version>2.4</version>
+                <configuration>
+                    <classifier>${envClassifier}</classifier>
+                </configuration>
+            </plugin>
+        </plugins>
         <sourceDirectory>src/jvm</sourceDirectory>
         <testSourceDirectory>src/test</testSourceDirectory>
         <resources>
@@ -87,7 +101,7 @@
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
-            <version>2.9.2</version>
+            <version>${scalaVersion}</version>
         </dependency>
         <dependency>
             <groupId>junit</groupId>
@@ -140,7 +154,7 @@
         </dependency>
         <dependency>
             <groupId>org.apache.kafka</groupId>
-            <artifactId>kafka_2.9.2</artifactId>
+            <artifactId>${kafkaArtifact}</artifactId>
             <version>0.8.0</version>
             <exclusions>
                 <exclusion>
@@ -179,4 +193,15 @@
             <url>https://clojars.org/repo</url>
         </repository>
     </distributionManagement>
+
+    <profiles>
+        <profile>
+            <id>Scala-2.10</id>
+            <properties>
+                <scalaVersion>2.10.3</scalaVersion>
+                <kafkaArtifact>kafka_2.10</kafkaArtifact>
+                <envClassifier>scala_2.10</envClassifier>
+            </properties>
+        </profile>
+    </profiles>
 </project>


[06/50] [abbrv] git commit: Renamed HostPort to Broker

Posted by pt...@apache.org.
Renamed HostPort to Broker


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/735b87f7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/735b87f7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/735b87f7

Branch: refs/heads/master
Commit: 735b87f78459ec686017c538ef50d95a7db9584b
Parents: da18bd8
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Wed Dec 25 16:28:16 2013 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Wed Dec 25 16:28:16 2013 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/Broker.java                 | 56 ++++++++++++++++++++
 src/jvm/storm/kafka/DynamicBrokersReader.java   |  8 +--
 .../kafka/DynamicPartitionConnections.java      | 10 ++--
 src/jvm/storm/kafka/HostPort.java               | 56 --------------------
 src/jvm/storm/kafka/Partition.java              |  4 +-
 .../storm/kafka/StaticPartitionConnections.java |  2 +-
 .../trident/GlobalPartitionInformation.java     | 17 +++---
 .../storm/kafka/DynamicBrokersReaderTest.java   | 28 +++++-----
 8 files changed, 91 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/jvm/storm/kafka/Broker.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/Broker.java b/src/jvm/storm/kafka/Broker.java
new file mode 100644
index 0000000..66e6112
--- /dev/null
+++ b/src/jvm/storm/kafka/Broker.java
@@ -0,0 +1,56 @@
+package storm.kafka;
+
+import java.io.Serializable;
+
+public class Broker implements Serializable, Comparable<Broker> {
+    public final String host;
+    public final int port;
+
+    public Broker(String host, int port) {
+        this.host = host;
+        this.port = port;
+    }
+
+    public Broker(String host) {
+        this(host, 9092);
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        Broker other = (Broker) o;
+        return host.equals(other.host) && port == other.port;
+    }
+
+    @Override
+    public int hashCode() {
+        return host.hashCode();
+    }
+
+    @Override
+    public String toString() {
+        return host + ":" + port;
+    }
+
+    public static Broker fromString(String host) {
+        Broker hp;
+        String[] spec = host.split(":");
+        if (spec.length == 1) {
+            hp = new Broker(spec[0]);
+        } else if (spec.length == 2) {
+            hp = new Broker(spec[0], Integer.parseInt(spec[1]));
+        } else {
+            throw new IllegalArgumentException("Invalid host specification: " + host);
+        }
+        return hp;
+    }
+
+
+    @Override
+    public int compareTo(Broker o) {
+        if (this.host.equals(o.host)) {
+            return this.port - o.port;
+        } else {
+            return this.host.compareTo(o.host);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/jvm/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/DynamicBrokersReader.java b/src/jvm/storm/kafka/DynamicBrokersReader.java
index c802baf..5b1d750 100644
--- a/src/jvm/storm/kafka/DynamicBrokersReader.java
+++ b/src/jvm/storm/kafka/DynamicBrokersReader.java
@@ -51,8 +51,8 @@ public class DynamicBrokersReader {
                 int leader = getLeaderFor(partition);
                 String path = brokerInfoPath + "/" + leader;
                 try {
-                    byte[] hostPortData = _curator.getData().forPath(path);
-                    HostPort hp = getBrokerHost(hostPortData);
+                    byte[] brokerData = _curator.getData().forPath(path);
+                    Broker hp = getBrokerHost(brokerData);
                     globalPartitionInformation.addPartition(partition, hp);
                 } catch (org.apache.zookeeper.KeeperException.NoNodeException e) {
                     LOG.error("Node {} does not exist ", path);
@@ -114,12 +114,12 @@ public class DynamicBrokersReader {
      * @param contents
      * @return
      */
-    private HostPort getBrokerHost(byte[] contents) {
+    private Broker getBrokerHost(byte[] contents) {
         try {
             Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(contents, "UTF-8"));
             String host = (String) value.get("host");
             Integer port = ((Long) value.get("port")).intValue();
-            return new HostPort(host, port);
+            return new Broker(host, port);
         } catch (UnsupportedEncodingException e) {
             throw new RuntimeException(e);
         }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/jvm/storm/kafka/DynamicPartitionConnections.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/DynamicPartitionConnections.java b/src/jvm/storm/kafka/DynamicPartitionConnections.java
index 7a799a0..8d0115b 100644
--- a/src/jvm/storm/kafka/DynamicPartitionConnections.java
+++ b/src/jvm/storm/kafka/DynamicPartitionConnections.java
@@ -24,7 +24,7 @@ public class DynamicPartitionConnections {
         }
     }
 
-    Map<HostPort, ConnectionInfo> _connections = new HashMap();
+    Map<Broker, ConnectionInfo> _connections = new HashMap();
     KafkaConfig _config;
     IBrokerReader _reader;
 
@@ -34,11 +34,11 @@ public class DynamicPartitionConnections {
     }
 
     public SimpleConsumer register(Partition partition) {
-        HostPort hostPort = _reader.getCurrentBrokers().getHostFor(partition.partition);
-        return register(hostPort, partition.partition);
+        Broker broker = _reader.getCurrentBrokers().getBrokerFor(partition.partition);
+        return register(broker, partition.partition);
     }
 
-    public SimpleConsumer register(HostPort host, int partition) {
+    public SimpleConsumer register(Broker host, int partition) {
         if (!_connections.containsKey(host)) {
             _connections.put(host, new ConnectionInfo(new SimpleConsumer(host.host, host.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId)));
         }
@@ -55,7 +55,7 @@ public class DynamicPartitionConnections {
         return null;
     }
 
-    public void unregister(HostPort port, int partition) {
+    public void unregister(Broker port, int partition) {
         ConnectionInfo info = _connections.get(port);
         info.partitions.remove(partition);
         if (info.partitions.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/jvm/storm/kafka/HostPort.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/HostPort.java b/src/jvm/storm/kafka/HostPort.java
deleted file mode 100644
index 5369858..0000000
--- a/src/jvm/storm/kafka/HostPort.java
+++ /dev/null
@@ -1,56 +0,0 @@
-package storm.kafka;
-
-import java.io.Serializable;
-
-public class HostPort implements Serializable, Comparable<HostPort> {
-    public String host;
-    public int port;
-
-    public HostPort(String host, int port) {
-        this.host = host;
-        this.port = port;
-    }
-
-    public HostPort(String host) {
-        this(host, 9092);
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        HostPort other = (HostPort) o;
-        return host.equals(other.host) && port == other.port;
-    }
-
-    @Override
-    public int hashCode() {
-        return host.hashCode();
-    }
-
-    @Override
-    public String toString() {
-        return host + ":" + port;
-    }
-
-    public static HostPort fromString(String host) {
-        HostPort hp;
-        String[] spec = host.split(":");
-        if (spec.length == 1) {
-            hp = new HostPort(spec[0]);
-        } else if (spec.length == 2) {
-            hp = new HostPort(spec[0], Integer.parseInt(spec[1]));
-        } else {
-            throw new IllegalArgumentException("Invalid host specification: " + host);
-        }
-        return hp;
-    }
-
-
-    @Override
-    public int compareTo(HostPort o) {
-        if (this.host.equals(o.host)) {
-            return this.port - o.port;
-        } else {
-            return this.host.compareTo(o.host);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/jvm/storm/kafka/Partition.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/Partition.java b/src/jvm/storm/kafka/Partition.java
index 87ab7b8..bbb4fbb 100644
--- a/src/jvm/storm/kafka/Partition.java
+++ b/src/jvm/storm/kafka/Partition.java
@@ -6,10 +6,10 @@ import storm.trident.spout.ISpoutPartition;
 
 public class Partition implements ISpoutPartition {
 
-    public final HostPort host;
+    public final Broker host;
     public final int partition;
 
-    public Partition(HostPort host, int partition) {
+    public Partition(Broker host, int partition) {
         this.host = host;
         this.partition = partition;
     }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/jvm/storm/kafka/StaticPartitionConnections.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/StaticPartitionConnections.java b/src/jvm/storm/kafka/StaticPartitionConnections.java
index 4294362..a9b9db1 100644
--- a/src/jvm/storm/kafka/StaticPartitionConnections.java
+++ b/src/jvm/storm/kafka/StaticPartitionConnections.java
@@ -20,7 +20,7 @@ public class StaticPartitionConnections {
 
     public SimpleConsumer getConsumer(int partition) {
         if (!_kafka.containsKey(partition)) {
-            HostPort hp = hosts.getPartitionInformation().getHostFor(partition);
+            Broker hp = hosts.getPartitionInformation().getBrokerFor(partition);
             _kafka.put(partition, new SimpleConsumer(hp.host, hp.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId));
 
         }

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
index 6b0fdec..a790009 100644
--- a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
+++ b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
@@ -1,6 +1,7 @@
 package storm.kafka.trident;
 
-import storm.kafka.HostPort;
+import storm.kafka.Broker;
+import storm.kafka.Broker;
 import storm.kafka.Partition;
 
 import java.io.Serializable;
@@ -12,13 +13,13 @@ import java.util.*;
  */
 public class GlobalPartitionInformation implements Iterable<Partition>, Serializable {
 
-    private Map<Integer, HostPort> partitionMap;
+    private Map<Integer, Broker> partitionMap;
 
     public GlobalPartitionInformation() {
-        partitionMap = new TreeMap<Integer, HostPort>();
+        partitionMap = new TreeMap<Integer, Broker>();
     }
 
-    public void addPartition(int partitionId, HostPort broker) {
+    public void addPartition(int partitionId, Broker broker) {
         partitionMap.put(partitionId, broker);
     }
 
@@ -29,13 +30,13 @@ public class GlobalPartitionInformation implements Iterable<Partition>, Serializ
                 '}';
     }
 
-    public HostPort getHostFor(Integer partitionId) {
+    public Broker getBrokerFor(Integer partitionId) {
         return partitionMap.get(partitionId);
     }
 
     public List<Partition> getOrderedPartitions() {
         List<Partition> partitions = new LinkedList<Partition>();
-        for (Map.Entry<Integer, HostPort> partition : partitionMap.entrySet()) {
+        for (Map.Entry<Integer, Broker> partition : partitionMap.entrySet()) {
             partitions.add(new Partition(partition.getValue(), partition.getKey()));
         }
         return partitions;
@@ -43,7 +44,7 @@ public class GlobalPartitionInformation implements Iterable<Partition>, Serializ
 
     @Override
     public Iterator<Partition> iterator() {
-        final Iterator<Map.Entry<Integer, HostPort>> iterator = partitionMap.entrySet().iterator();
+        final Iterator<Map.Entry<Integer, Broker>> iterator = partitionMap.entrySet().iterator();
 
         return new Iterator<Partition>() {
             @Override
@@ -53,7 +54,7 @@ public class GlobalPartitionInformation implements Iterable<Partition>, Serializ
 
             @Override
             public Partition next() {
-                Map.Entry<Integer, HostPort> next = iterator.next();
+                Map.Entry<Integer, Broker> next = iterator.next();
                 return new Partition(next.getValue(), next.getKey());
             }
 

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/735b87f7/src/test/storm/kafka/DynamicBrokersReaderTest.java
----------------------------------------------------------------------
diff --git a/src/test/storm/kafka/DynamicBrokersReaderTest.java b/src/test/storm/kafka/DynamicBrokersReaderTest.java
index fd90c3c..47387e3 100644
--- a/src/test/storm/kafka/DynamicBrokersReaderTest.java
+++ b/src/test/storm/kafka/DynamicBrokersReaderTest.java
@@ -88,8 +88,8 @@ public class DynamicBrokersReaderTest {
         addPartition(partition, host, port);
         GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
         assertEquals(1, brokerInfo.getOrderedPartitions().size());
-        assertEquals(port, brokerInfo.getHostFor(partition).port);
-        assertEquals(host, brokerInfo.getHostFor(partition).host);
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
     }
 
 
@@ -106,11 +106,11 @@ public class DynamicBrokersReaderTest {
         GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
         assertEquals(2, brokerInfo.getOrderedPartitions().size());
 
-        assertEquals(port, brokerInfo.getHostFor(partition).port);
-        assertEquals(host, brokerInfo.getHostFor(partition).host);
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
 
-        assertEquals(secondPort, brokerInfo.getHostFor(secondPartition).port);
-        assertEquals(host, brokerInfo.getHostFor(secondPartition).host);
+        assertEquals(secondPort, brokerInfo.getBrokerFor(secondPartition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
     }
 
 
@@ -126,11 +126,11 @@ public class DynamicBrokersReaderTest {
         GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
         assertEquals(2, brokerInfo.getOrderedPartitions().size());
 
-        assertEquals(port, brokerInfo.getHostFor(partition).port);
-        assertEquals(host, brokerInfo.getHostFor(partition).host);
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
 
-        assertEquals(port, brokerInfo.getHostFor(secondPartition).port);
-        assertEquals(host, brokerInfo.getHostFor(secondPartition).host);
+        assertEquals(port, brokerInfo.getBrokerFor(secondPartition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
     }
 
     @Test
@@ -140,14 +140,14 @@ public class DynamicBrokersReaderTest {
         int partition = 0;
         addPartition(partition, host, port);
         GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
-        assertEquals(port, brokerInfo.getHostFor(partition).port);
-        assertEquals(host, brokerInfo.getHostFor(partition).host);
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
 
         String newHost = host + "switch";
         int newPort = port + 1;
         addPartition(partition, newHost, newPort);
         brokerInfo = dynamicBrokersReader.getBrokerInfo();
-        assertEquals(newPort, brokerInfo.getHostFor(partition).port);
-        assertEquals(newHost, brokerInfo.getHostFor(partition).host);
+        assertEquals(newPort, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(newHost, brokerInfo.getBrokerFor(partition).host);
     }
 }


[05/50] [abbrv] git commit: removed empty statement

Posted by pt...@apache.org.
removed empty statement


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/da18bd85
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/da18bd85
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/da18bd85

Branch: refs/heads/master
Commit: da18bd858035c495edb41cc018fe706a3f7ef831
Parents: e8f54d6
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Dec 22 16:10:14 2013 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Dec 22 16:10:14 2013 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/trident/KafkaUtils.java | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/da18bd85/src/jvm/storm/kafka/trident/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/KafkaUtils.java b/src/jvm/storm/kafka/trident/KafkaUtils.java
index 18dd851..e4ba3b3 100644
--- a/src/jvm/storm/kafka/trident/KafkaUtils.java
+++ b/src/jvm/storm/kafka/trident/KafkaUtils.java
@@ -109,6 +109,4 @@ public class KafkaUtils {
             }
         }
     }
-
-    ;
 }


[16/50] [abbrv] git commit: use guava equals/hashCode

Posted by pt...@apache.org.
use guava equals/hashCode


Project: http://git-wip-us.apache.org/repos/asf/incubator-storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-storm/commit/aabcd584
Tree: http://git-wip-us.apache.org/repos/asf/incubator-storm/tree/aabcd584
Diff: http://git-wip-us.apache.org/repos/asf/incubator-storm/diff/aabcd584

Branch: refs/heads/master
Commit: aabcd5846010ff0bc5832520bf8f59e9879728cc
Parents: 48f6840
Author: wurstmeister <wu...@users.noreply.github.com>
Authored: Sun Jan 26 21:32:13 2014 +0000
Committer: wurstmeister <wu...@users.noreply.github.com>
Committed: Sun Jan 26 21:34:54 2014 +0000

----------------------------------------------------------------------
 src/jvm/storm/kafka/Broker.java                 | 17 +++++++++----
 .../trident/GlobalPartitionInformation.java     | 26 +++++++-------------
 2 files changed, 21 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/aabcd584/src/jvm/storm/kafka/Broker.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/Broker.java b/src/jvm/storm/kafka/Broker.java
index 66e6112..2451eee 100644
--- a/src/jvm/storm/kafka/Broker.java
+++ b/src/jvm/storm/kafka/Broker.java
@@ -1,6 +1,7 @@
 package storm.kafka;
 
 import java.io.Serializable;
+import com.google.common.base.Objects;
 
 public class Broker implements Serializable, Comparable<Broker> {
     public final String host;
@@ -16,14 +17,20 @@ public class Broker implements Serializable, Comparable<Broker> {
     }
 
     @Override
-    public boolean equals(Object o) {
-        Broker other = (Broker) o;
-        return host.equals(other.host) && port == other.port;
+    public int hashCode() {
+        return Objects.hashCode(host, port);
     }
 
     @Override
-    public int hashCode() {
-        return host.hashCode();
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null || getClass() != obj.getClass()) {
+            return false;
+        }
+        final Broker other = (Broker) obj;
+        return Objects.equal(this.host, other.host) && Objects.equal(this.port, other.port);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-storm/blob/aabcd584/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --git a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
index e73d6b0..6f82f62 100644
--- a/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
+++ b/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
@@ -1,12 +1,13 @@
 package storm.kafka.trident;
 
 import storm.kafka.Broker;
-import storm.kafka.Broker;
 import storm.kafka.Partition;
 
 import java.io.Serializable;
 import java.util.*;
 
+import com.google.common.base.Objects;
+
 /**
  * Date: 14/05/2013
  * Time: 19:18
@@ -67,27 +68,18 @@ public class GlobalPartitionInformation implements Iterable<Partition>, Serializ
 
     @Override
     public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result
-                + ((partitionMap == null) ? 0 : partitionMap.hashCode());
-        return result;
+        return Objects.hashCode(partitionMap);
     }
 
     @Override
     public boolean equals(Object obj) {
-        if (this == obj)
+        if (this == obj) {
             return true;
-        if (obj == null)
-            return false;
-        if (getClass() != obj.getClass())
-            return false;
-        GlobalPartitionInformation other = (GlobalPartitionInformation) obj;
-        if (partitionMap == null) {
-            if (other.partitionMap != null)
-                return false;
-        } else if (!partitionMap.equals(other.partitionMap))
+        }
+        if (obj == null || getClass() != obj.getClass()) {
             return false;
-        return true;
+        }
+        final GlobalPartitionInformation other = (GlobalPartitionInformation) obj;
+        return Objects.equal(this.partitionMap, other.partitionMap);
     }
 }