You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by jk...@apache.org on 2015/01/30 03:39:37 UTC

[7/7] kafka git commit: KAFKA-1760: New consumer.

KAFKA-1760: New consumer.


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/0699ff2c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/0699ff2c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/0699ff2c

Branch: refs/heads/trunk
Commit: 0699ff2ce60abb466cab5315977a224f1a70a4da
Parents: 11ec9bf
Author: Jay Kreps <ja...@gmail.com>
Authored: Sun Jan 11 11:29:48 2015 -0800
Committer: Jay Kreps <ja...@gmail.com>
Committed: Thu Jan 29 02:55:35 2015 -0800

----------------------------------------------------------------------
 build.gradle                                    |    1 +
 .../org/apache/kafka/clients/ClientRequest.java |   19 +-
 .../kafka/clients/ClusterConnectionStates.java  |   30 +-
 .../kafka/clients/CommonClientConfigs.java      |   58 +
 .../apache/kafka/clients/ConnectionState.java   |    2 +-
 .../org/apache/kafka/clients/KafkaClient.java   |   44 +-
 .../org/apache/kafka/clients/NetworkClient.java |  138 +-
 .../kafka/clients/NodeConnectionState.java      |   31 -
 .../kafka/clients/RequestCompletionHandler.java |   23 +
 .../kafka/clients/consumer/CommitType.java      |    5 +
 .../apache/kafka/clients/consumer/Consumer.java |  109 +-
 .../kafka/clients/consumer/ConsumerConfig.java  |  296 ++-
 .../consumer/ConsumerRebalanceCallback.java     |   89 +-
 .../kafka/clients/consumer/ConsumerRecord.java  |   89 +-
 .../kafka/clients/consumer/ConsumerRecords.java |  107 +-
 .../kafka/clients/consumer/KafkaConsumer.java   | 1865 +++++++++++++-----
 .../kafka/clients/consumer/MockConsumer.java    |  233 ++-
 .../consumer/NoOffsetForPartitionException.java |   29 +
 .../kafka/clients/consumer/OffsetMetadata.java  |   59 -
 .../clients/consumer/internals/Heartbeat.java   |   47 +
 .../NoOpConsumerRebalanceCallback.java          |   30 +
 .../consumer/internals/SubscriptionState.java   |  166 ++
 .../kafka/clients/producer/KafkaProducer.java   |    3 +-
 .../kafka/clients/producer/MockProducer.java    |    2 +-
 .../kafka/clients/producer/ProducerConfig.java  |   64 +-
 .../clients/producer/internals/Metadata.java    |   22 +-
 .../clients/producer/internals/Partitioner.java |   27 +-
 .../clients/producer/internals/Sender.java      |  155 +-
 .../java/org/apache/kafka/common/Cluster.java   |   14 +
 .../org/apache/kafka/common/PartitionInfo.java  |    4 +-
 .../apache/kafka/common/config/ConfigDef.java   |   26 +-
 .../kafka/common/errors/ApiException.java       |    2 +-
 .../apache/kafka/common/network/Selectable.java |   58 +-
 .../apache/kafka/common/network/Selector.java   |   99 +-
 .../apache/kafka/common/protocol/Errors.java    |   59 +-
 .../kafka/common/protocol/types/Struct.java     |   46 +-
 .../apache/kafka/common/record/LogEntry.java    |    4 +
 .../kafka/common/record/MemoryRecords.java      |   48 +-
 .../requests/ConsumerMetadataRequest.java       |    9 +-
 .../requests/ConsumerMetadataResponse.java      |   17 +-
 .../kafka/common/requests/FetchRequest.java     |   74 +-
 .../kafka/common/requests/FetchResponse.java    |   21 +-
 .../kafka/common/requests/HeartbeatRequest.java |   13 +-
 .../common/requests/HeartbeatResponse.java      |    9 +-
 .../kafka/common/requests/JoinGroupRequest.java |   17 +-
 .../common/requests/JoinGroupResponse.java      |   25 +-
 .../common/requests/ListOffsetRequest.java      |   25 +-
 .../common/requests/ListOffsetResponse.java     |   19 +-
 .../kafka/common/requests/MetadataRequest.java  |    9 +-
 .../kafka/common/requests/MetadataResponse.java |   33 +-
 .../common/requests/OffsetCommitRequest.java    |   45 +-
 .../common/requests/OffsetCommitResponse.java   |   17 +-
 .../common/requests/OffsetFetchRequest.java     |   17 +-
 .../common/requests/OffsetFetchResponse.java    |   46 +-
 .../kafka/common/requests/ProduceRequest.java   |   21 +-
 .../kafka/common/requests/ProduceResponse.java  |   19 +-
 .../common/serialization/Deserializer.java      |    2 +-
 .../org/apache/kafka/common/utils/Utils.java    |   42 +
 .../org/apache/kafka/clients/MockClient.java    |   31 +-
 .../apache/kafka/clients/NetworkClientTest.java |   35 +-
 .../clients/consumer/ConsumerExampleTest.java   |  297 ---
 .../clients/consumer/MockConsumerTest.java      |   32 +
 .../internals/SubscriptionStateTest.java        |   61 +
 .../kafka/clients/producer/BufferPoolTest.java  |    4 +-
 .../clients/producer/MockProducerTest.java      |    2 +-
 .../kafka/clients/producer/PartitionerTest.java |   49 +-
 .../kafka/clients/producer/SenderTest.java      |    6 +-
 .../kafka/common/config/ConfigDefTest.java      |    6 +-
 .../kafka/common/network/SelectorTest.java      |   53 +-
 .../apache/kafka/common/utils/UtilsTest.java    |   10 +
 .../org/apache/kafka/test/MockSelector.java     |   52 +-
 .../kafka/api/ConsumerMetadataRequest.scala     |    2 +-
 .../kafka/api/ConsumerMetadataResponse.scala    |    2 +-
 .../main/scala/kafka/cluster/Partition.scala    |    2 +-
 .../controller/ControllerChannelManager.scala   |    2 +-
 core/src/main/scala/kafka/log/LogConfig.scala   |    4 +-
 .../src/main/scala/kafka/server/KafkaApis.scala |   29 +
 .../scala/kafka/server/ReplicaManager.scala     |    2 +-
 .../scala/kafka/tools/ConsoleConsumer.scala     |    2 -
 .../scala/kafka/tools/ConsumerPerformance.scala |  169 +-
 .../kafka/tools/SimpleConsumerPerformance.scala |    2 +-
 .../main/scala/kafka/utils/KafkaScheduler.scala |   56 +-
 .../integration/kafka/api/ConsumerTest.scala    |  286 +++
 .../kafka/api/IntegrationTestHarness.scala      |   73 +
 .../api/RequestResponseSerializationTest.scala  |    4 +-
 .../integration/KafkaServerTestHarness.scala    |   39 +-
 .../kafka/integration/PrimitiveApiTest.scala    |   19 +-
 .../scala/unit/kafka/utils/MockScheduler.scala  |    2 +
 .../scala/unit/kafka/utils/SchedulerTest.scala  |   17 +
 .../test/scala/unit/kafka/utils/TestUtils.scala |   34 +-
 90 files changed, 4002 insertions(+), 1965 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index a980f61..6844372 100644
--- a/build.gradle
+++ b/build.gradle
@@ -369,6 +369,7 @@ project(':clients') {
   }
 
   javadoc {
+    include "**/org/apache/kafka/clients/consumer/*"
     include "**/org/apache/kafka/clients/producer/*"
     include "**/org/apache/kafka/common/*"
     include "**/org/apache/kafka/common/errors/*"

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java b/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java
index d32c319..ed4c0d9 100644
--- a/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java
+++ b/clients/src/main/java/org/apache/kafka/clients/ClientRequest.java
@@ -22,24 +22,25 @@ public final class ClientRequest {
     private final long createdMs;
     private final boolean expectResponse;
     private final RequestSend request;
-    private final Object attachment;
+    private final RequestCompletionHandler callback;
 
     /**
      * @param createdMs The unix timestamp in milliseconds for the time at which this request was created.
      * @param expectResponse Should we expect a response message or is this request complete once it is sent?
      * @param request The request
-     * @param attachment Associated data with the request
+     * @param callback A callback to execute when the response has been received (or null if no callback is necessary)
      */
-    public ClientRequest(long createdMs, boolean expectResponse, RequestSend request, Object attachment) {
+    public ClientRequest(long createdMs, boolean expectResponse, RequestSend request, RequestCompletionHandler callback) {
         this.createdMs = createdMs;
-        this.attachment = attachment;
+        this.callback = callback;
         this.request = request;
         this.expectResponse = expectResponse;
     }
 
     @Override
     public String toString() {
-        return "ClientRequest(expectResponse=" + expectResponse + ", payload=" + attachment + ", request=" + request + ")";
+        return "ClientRequest(expectResponse=" + expectResponse + ", callback=" + callback + ", request=" + request
+                + ")";
     }
 
     public boolean expectResponse() {
@@ -50,8 +51,12 @@ public final class ClientRequest {
         return request;
     }
 
-    public Object attachment() {
-        return attachment;
+    public boolean hasCallback() {
+        return callback != null;
+    }
+
+    public RequestCompletionHandler callback() {
+        return callback;
     }
 
     public long createdTime() {

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java b/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
index 8aece7e..574287d 100644
--- a/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
+++ b/clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
@@ -119,16 +119,42 @@ final class ClusterConnectionStates {
     public void disconnected(int node) {
         nodeState(node).state = ConnectionState.DISCONNECTED;
     }
-
+    
     /**
-     * Get the state of our connection to the given state
+     * Get the state of our connection to the given node
      * @param node The id of the node
      * @return The state of our connection
      */
+    public ConnectionState connectionState(int node) {
+        return nodeState(node).state;
+    }
+    
+    /**
+     * Get the state of a given node
+     * @param node The node to fetch the state for
+     */
     private NodeConnectionState nodeState(int node) {
         NodeConnectionState state = this.nodeState.get(node);
         if (state == null)
             throw new IllegalStateException("No entry found for node " + node);
         return state;
     }
+    
+    /**
+     * The state of our connection to a node
+     */
+    private static class NodeConnectionState {
+
+        ConnectionState state;
+        long lastConnectAttemptMs;
+
+        public NodeConnectionState(ConnectionState state, long lastConnectAttempt) {
+            this.state = state;
+            this.lastConnectAttemptMs = lastConnectAttempt;
+        }
+
+        public String toString() {
+            return "NodeState(" + state + ", " + lastConnectAttemptMs + ")";
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
new file mode 100644
index 0000000..06fcfe6
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.kafka.clients;
+
+/**
+ * Some configurations shared by both producer and consumer
+ */
+public class CommonClientConfigs {
+    
+    /*
+     * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
+     */
+
+    public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
+    public static final String BOOSTRAP_SERVERS_DOC = "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form "
+                                                       + "<code>host1:port1,host2:port2,...</code>. Since these servers are just used for the initial connection to "
+                                                       + "discover the full cluster membership (which may change dynamically), this list need not contain the full set of "
+                                                       + "servers (you may want more than one, though, in case a server is down).";
+    
+    public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms";
+    public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.";
+    
+    public static final String SEND_BUFFER_CONFIG = "send.buffer.bytes";
+    public static final String SEND_BUFFER_DOC = "The size of the TCP send buffer (SO_SNDBUF) to use when sending data.";
+
+    public static final String RECEIVE_BUFFER_CONFIG = "receive.buffer.bytes";
+    public static final String RECEIVE_BUFFER_DOC = "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.";
+
+    public static final String CLIENT_ID_CONFIG = "client.id";
+    public static final String CLIENT_ID_DOC = "An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.";
+
+    public static final String RECONNECT_BACKOFF_MS_CONFIG = "reconnect.backoff.ms";
+    public static final String RECONNECT_BACKOFF_MS_DOC = "The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.";
+
+    public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms";
+    public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed fetch request to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.";
+    
+    public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms";
+    public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The number of samples maintained to compute metrics.";
+
+    public static final String METRICS_NUM_SAMPLES_CONFIG = "metrics.num.samples";
+    public static final String METRICS_NUM_SAMPLES_DOC = "The number of samples maintained to compute metrics.";
+
+    public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters";
+    public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the <code>MetricReporter</code> interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.";
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java b/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java
index ab7e322..3867f8e 100644
--- a/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java
+++ b/clients/src/main/java/org/apache/kafka/clients/ConnectionState.java
@@ -15,6 +15,6 @@ package org.apache.kafka.clients;
 /**
  * The states of a node connection
  */
-enum ConnectionState {
+public enum ConnectionState {
     DISCONNECTED, CONNECTING, CONNECTED
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java b/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
index 3976955..8a3e55a 100644
--- a/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
+++ b/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
@@ -26,6 +26,7 @@ public interface KafkaClient {
     /**
      * Check if we are currently ready to send another request to the given node but don't attempt to connect if we
      * aren't.
+     * 
      * @param node The node to check
      * @param now The current timestamp
      */
@@ -34,6 +35,7 @@ public interface KafkaClient {
     /**
      * Initiate a connection to the given node (if necessary), and return true if already connected. The readiness of a
      * node will change only when poll is invoked.
+     * 
      * @param node The node to connect to.
      * @param now The current time
      * @return true iff we are ready to immediately initiate the sending of another request to the given node.
@@ -44,6 +46,7 @@ public interface KafkaClient {
      * Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When
      * disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
      * connections.
+     * 
      * @param node The node to check
      * @param now The current timestamp
      * @return The number of milliseconds to wait.
@@ -51,19 +54,44 @@ public interface KafkaClient {
     public long connectionDelay(Node node, long now);
 
     /**
-     * Initiate the sending of the given requests and return any completed responses. Requests can only be sent on ready
-     * connections.
-     * @param requests The requests to send
+     * Queue up the given request for sending. Requests can only be sent on ready connections.
+     * 
+     * @param request The request
+     * @param now The current time
+     */
+    public void send(ClientRequest request);
+
+    /**
+     * Do actual reads and writes from sockets.
+     * 
      * @param timeout The maximum amount of time to wait for responses in ms
      * @param now The current time in ms
      * @throws IllegalStateException If a request is sent to an unready node
      */
-    public List<ClientResponse> poll(List<ClientRequest> requests, long timeout, long now);
+    public List<ClientResponse> poll(long timeout, long now);
+
+    /**
+     * Complete all in-flight requests for a given node
+     * 
+     * @param node The node to complete requests for
+     * @param now The current time in ms
+     * @return All requests that complete during this time period.
+     */
+    public List<ClientResponse> completeAll(int node, long now);
+
+    /**
+     * Complete all in-flight requests
+     * 
+     * @param now The current time in ms
+     * @return All requests that complete during this time period.
+     */
+    public List<ClientResponse> completeAll(long now);
 
     /**
      * Choose the node with the fewest outstanding requests. This method will prefer a node with an existing connection,
      * but will potentially choose a node for which we don't yet have a connection if all existing connections are in
      * use.
+     * 
      * @param now The current time in ms
      * @return The node with the fewest in-flight requests.
      */
@@ -75,7 +103,15 @@ public interface KafkaClient {
     public int inFlightRequestCount();
 
     /**
+     * Get the total in-flight requests for a particular node
+     * 
+     * @param nodeId The id of the node
+     */
+    public int inFlightRequestCount(int nodeId);
+
+    /**
      * Generate a request header for the next request
+     * 
      * @param key The API key of the request
      */
     public RequestHeader nextRequestHeader(ApiKeys key);

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
index 6746275..5950191 100644
--- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
+++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
@@ -102,6 +102,7 @@ public class NetworkClient implements KafkaClient {
 
     /**
      * Begin connecting to the given node, return true if we are already connected and ready to send to that node.
+     * 
      * @param node The node to check
      * @param now The current timestamp
      * @return True if we are ready to send to the given node
@@ -122,6 +123,7 @@ public class NetworkClient implements KafkaClient {
      * Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When
      * disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
      * connections.
+     * 
      * @param node The node to check
      * @param now The current timestamp
      * @return The number of milliseconds to wait.
@@ -133,7 +135,8 @@ public class NetworkClient implements KafkaClient {
 
     /**
      * Check if the node with the given id is ready to send more requests.
-     * @param node The given node id
+     * 
+     * @param node The node
      * @param now The current time in ms
      * @return true if the node is ready
      */
@@ -141,7 +144,8 @@ public class NetworkClient implements KafkaClient {
     public boolean isReady(Node node, long now) {
         int nodeId = node.id();
         if (!this.metadataFetchInProgress && this.metadata.timeToNextUpdate(now) == 0)
-            // if we need to update our metadata now declare all requests unready to make metadata requests first priority
+            // if we need to update our metadata now declare all requests unready to make metadata requests first
+            // priority
             return false;
         else
             // otherwise we are ready if we are connected and can send more requests
@@ -150,6 +154,7 @@ public class NetworkClient implements KafkaClient {
 
     /**
      * Are we connected and ready and able to send more requests to the given node?
+     * 
      * @param node The node
      */
     private boolean isSendable(int node) {
@@ -157,49 +162,106 @@ public class NetworkClient implements KafkaClient {
     }
 
     /**
-     * Initiate the given requests and check for any new responses, waiting up to the specified time. Requests can only
-     * be sent for ready nodes.
-     * @param requests The requests to initiate
+     * Return the state of the connection to the given node
+     * 
+     * @param node The node to check
+     * @return The connection state
+     */
+    public ConnectionState connectionState(int node) {
+        return connectionStates.connectionState(node);
+    }
+
+    /**
+     * Queue up the given request for sending. Requests can only be sent out to ready nodes.
+     * 
+     * @param request The request
+     * @param now The current time
+     */
+    @Override
+    public void send(ClientRequest request) {
+        int nodeId = request.request().destination();
+        if (!isSendable(nodeId))
+            throw new IllegalStateException("Attempt to send a request to node " + nodeId + " which is not ready.");
+
+        this.inFlightRequests.add(request);
+        selector.send(request.request());
+    }
+
+    /**
+     * Do actual reads and writes to sockets.
+     * 
      * @param timeout The maximum amount of time to wait (in ms) for responses if there are none immediately
      * @param now The current time in milliseconds
      * @return The list of responses received
      */
     @Override
-    public List<ClientResponse> poll(List<ClientRequest> requests, long timeout, long now) {
-        List<NetworkSend> sends = new ArrayList<NetworkSend>();
-
-        for (int i = 0; i < requests.size(); i++) {
-            ClientRequest request = requests.get(i);
-            int nodeId = request.request().destination();
-            if (!isSendable(nodeId))
-                throw new IllegalStateException("Attempt to send a request to node " + nodeId + " which is not ready.");
-
-            this.inFlightRequests.add(request);
-            sends.add(request.request());
-        }
-
+    public List<ClientResponse> poll(long timeout, long now) {
         // should we update our metadata?
         long timeToNextMetadataUpdate = metadata.timeToNextUpdate(now);
         long timeToNextReconnectAttempt = Math.max(this.lastNoNodeAvailableMs + metadata.refreshBackoff() - now, 0);
         long waitForMetadataFetch = (this.metadataFetchInProgress ? Integer.MAX_VALUE : 0);
         // if there is no node available to connect, back off refreshing metadata
-        long metadataTimeout = Math.max(Math.max(timeToNextMetadataUpdate, timeToNextReconnectAttempt), waitForMetadataFetch);
+        long metadataTimeout = Math.max(Math.max(timeToNextMetadataUpdate, timeToNextReconnectAttempt),
+                                        waitForMetadataFetch);
         if (!this.metadataFetchInProgress && metadataTimeout == 0)
-            maybeUpdateMetadata(sends, now);
-
+            maybeUpdateMetadata(now);
         // do the I/O
         try {
-            this.selector.poll(Math.min(timeout, metadataTimeout), sends);
+            this.selector.poll(Math.min(timeout, metadataTimeout));
         } catch (IOException e) {
             log.error("Unexpected error during I/O in producer network thread", e);
         }
 
+        // process completed actions
         List<ClientResponse> responses = new ArrayList<ClientResponse>();
         handleCompletedSends(responses, now);
         handleCompletedReceives(responses, now);
         handleDisconnections(responses, now);
         handleConnections();
 
+        // invoke callbacks
+        for (ClientResponse response : responses) {
+            if (response.request().hasCallback()) {
+                try {
+                    response.request().callback().onComplete(response);
+                } catch (Exception e) {
+                    log.error("Uncaught error in request completion:", e);
+                }
+            }
+        }
+
+        return responses;
+    }
+
+    /**
+     * Await all the outstanding responses for requests on the given connection
+     * 
+     * @param node The node to block on
+     * @param now The current time in ms
+     * @return All the collected responses
+     */
+    @Override
+    public List<ClientResponse> completeAll(int node, long now) {
+        try {
+            this.selector.muteAll();
+            this.selector.unmute(node);
+            List<ClientResponse> responses = new ArrayList<ClientResponse>();
+            while (inFlightRequestCount(node) > 0)
+                responses.addAll(poll(Integer.MAX_VALUE, now));
+            return responses;
+        } finally {
+            this.selector.unmuteAll();
+        }
+    }
+
+    /**
+     * Wait for all outstanding requests to complete.
+     */
+    @Override
+    public List<ClientResponse> completeAll(long now) {
+        List<ClientResponse> responses = new ArrayList<ClientResponse>();
+        while (inFlightRequestCount() > 0)
+            responses.addAll(poll(Integer.MAX_VALUE, now));
         return responses;
     }
 
@@ -212,7 +274,16 @@ public class NetworkClient implements KafkaClient {
     }
 
     /**
+     * Get the number of in-flight requests for a given node
+     */
+    @Override
+    public int inFlightRequestCount(int nodeId) {
+        return this.inFlightRequests.inFlightRequestCount(nodeId);
+    }
+
+    /**
      * Generate a request header for the given API key
+     * 
      * @param key The api key
      * @return A request header with the appropriate client id and correlation id
      */
@@ -242,6 +313,7 @@ public class NetworkClient implements KafkaClient {
      * prefer a node with an existing connection, but will potentially choose a node for which we don't yet have a
      * connection if all existing connections are in use. This method will never choose a node for which there is no
      * existing connection and from which we have disconnected within the reconnect backoff period.
+     * 
      * @return The node with the fewest in-flight requests.
      */
     public Node leastLoadedNode(long now) {
@@ -261,12 +333,12 @@ public class NetworkClient implements KafkaClient {
                 found = node;
             }
         }
-
         return found;
     }
 
     /**
      * Handle any completed request send. In particular if no response is expected consider the request complete.
+     * 
      * @param responses The list of responses to update
      * @param now The current time
      */
@@ -283,6 +355,7 @@ public class NetworkClient implements KafkaClient {
 
     /**
      * Handle any completed receives and update the response list with the responses received.
+     * 
      * @param responses The list of responses to update
      * @param now The current time
      */
@@ -317,6 +390,7 @@ public class NetworkClient implements KafkaClient {
 
     /**
      * Handle any disconnected connections
+     * 
      * @param responses The list of responses that completed with the disconnection
      * @param now The current time
      */
@@ -353,10 +427,8 @@ public class NetworkClient implements KafkaClient {
      */
     private void correlate(RequestHeader requestHeader, ResponseHeader responseHeader) {
         if (requestHeader.correlationId() != responseHeader.correlationId())
-            throw new IllegalStateException("Correlation id for response (" + responseHeader.correlationId() +
-                                            ") does not match request (" +
-                                            requestHeader.correlationId() +
-                                            ")");
+            throw new IllegalStateException("Correlation id for response (" + responseHeader.correlationId()
+                    + ") does not match request (" + requestHeader.correlationId() + ")");
     }
 
     /**
@@ -371,7 +443,7 @@ public class NetworkClient implements KafkaClient {
     /**
      * Add a metadata request to the list of sends if we can make one
      */
-    private void maybeUpdateMetadata(List<NetworkSend> sends, long now) {
+    private void maybeUpdateMetadata(long now) {
         // Beware that the behavior of this method and the computation of timeouts for poll() are
         // highly dependent on the behavior of leastLoadedNode.
         Node node = this.leastLoadedNode(now);
@@ -382,17 +454,16 @@ public class NetworkClient implements KafkaClient {
             return;
         }
 
-        log.debug("Trying to send metadata request to node {}", node.id());
         if (connectionStates.isConnected(node.id()) && inFlightRequests.canSendMore(node.id())) {
             Set<String> topics = metadata.topics();
             this.metadataFetchInProgress = true;
             ClientRequest metadataRequest = metadataRequest(now, node.id(), topics);
             log.debug("Sending metadata request {} to node {}", metadataRequest, node.id());
-            sends.add(metadataRequest.request());
+            this.selector.send(metadataRequest.request());
             this.inFlightRequests.add(metadataRequest);
         } else if (connectionStates.canConnect(node.id(), now)) {
             // we don't have a connection to this node right now, make one
-            log.debug("Init connection to node {} for sending metadata request in the next iteration", node.id());
+            log.debug("Initialize connection to node {} for sending metadata request", node.id());
             initiateConnect(node, now);
             // If initiateConnect failed immediately, this node will be put into blackout and we
             // should allow immediately retrying in case there is another candidate node. If it
@@ -412,7 +483,10 @@ public class NetworkClient implements KafkaClient {
         try {
             log.debug("Initiating connection to node {} at {}:{}.", node.id(), node.host(), node.port());
             this.connectionStates.connecting(node.id(), now);
-            selector.connect(node.id(), new InetSocketAddress(node.host(), node.port()), this.socketSendBuffer, this.socketReceiveBuffer);
+            selector.connect(node.id(),
+                             new InetSocketAddress(node.host(), node.port()),
+                             this.socketSendBuffer,
+                             this.socketReceiveBuffer);
         } catch (IOException e) {
             /* attempt failed, we'll try again after the backoff */
             connectionStates.disconnected(node.id());

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/NodeConnectionState.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/NodeConnectionState.java b/clients/src/main/java/org/apache/kafka/clients/NodeConnectionState.java
deleted file mode 100644
index 752a979..0000000
--- a/clients/src/main/java/org/apache/kafka/clients/NodeConnectionState.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
- * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
- * License. You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- */
-package org.apache.kafka.clients;
-
-/**
- * The state of our connection to a node
- */
-final class NodeConnectionState {
-
-    ConnectionState state;
-    long lastConnectAttemptMs;
-
-    public NodeConnectionState(ConnectionState state, long lastConnectAttempt) {
-        this.state = state;
-        this.lastConnectAttemptMs = lastConnectAttempt;
-    }
-
-    public String toString() {
-        return "NodeState(" + state + ", " + lastConnectAttemptMs + ")";
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/RequestCompletionHandler.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/RequestCompletionHandler.java b/clients/src/main/java/org/apache/kafka/clients/RequestCompletionHandler.java
new file mode 100644
index 0000000..6fee4e4
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/RequestCompletionHandler.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package org.apache.kafka.clients;
+
+/**
+ * A callback interface for attaching an action to be executed when a request is complete and the corresponding response
+ * has been received. This handler will also be invoked if there is a disconnection while handling the request.
+ */
+public interface RequestCompletionHandler {
+
+    public void onComplete(ClientResponse response);
+
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/consumer/CommitType.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/CommitType.java b/clients/src/main/java/org/apache/kafka/clients/consumer/CommitType.java
new file mode 100644
index 0000000..072cc2e
--- /dev/null
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/CommitType.java
@@ -0,0 +1,5 @@
+package org.apache.kafka.clients.consumer;
+
+public enum CommitType {
+    SYNC, ASYNC
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
index c0c636b..8f587bc 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
@@ -9,14 +9,16 @@
  * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  * specific language governing permissions and limitations under the License.
-*/
+ */
 package org.apache.kafka.clients.consumer;
 
 import java.io.Closeable;
-import java.util.Collection;
+import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.kafka.common.Metric;
+import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.MetricName;
 
@@ -24,102 +26,85 @@ import org.apache.kafka.common.MetricName;
  * @see KafkaConsumer
  * @see MockConsumer
  */
-public interface Consumer<K,V> extends Closeable {
+public interface Consumer<K, V> extends Closeable {
+    
+    /**
+     * @see KafkaConsumer#subscriptions()
+     */
+    public Set<TopicPartition> subscriptions();
 
     /**
-     * Incrementally subscribe to the given list of topics. This API is mutually exclusive to 
-     * {@link #subscribe(TopicPartition...) subscribe(partitions)} 
-     * @param topics A variable list of topics that the consumer subscribes to
-     */ 
-    public void subscribe(String...topics);
+     * @see KafkaConsumer#subscribe(String...)
+     */
+    public void subscribe(String... topics);
 
     /**
-     * Incrementally subscribes to a specific topic and partition. This API is mutually exclusive to 
-     * {@link #subscribe(String...) subscribe(topics)}
-     * @param partitions Partitions to subscribe to
-     */ 
+     * @see KafkaConsumer#subscribe(TopicPartition...)
+     */
     public void subscribe(TopicPartition... partitions);
 
     /**
-     * Unsubscribe from the specific topics. Messages for this topic will not be returned from the next {@link #poll(long) poll()}
-     * onwards. This should be used in conjunction with {@link #subscribe(String...) subscribe(topics)}. It is an error to
-     * unsubscribe from a topic that was never subscribed to using {@link #subscribe(String...) subscribe(topics)} 
-     * @param topics Topics to unsubscribe from
+     * @see KafkaConsumer#unsubscribe(String...)
      */
     public void unsubscribe(String... topics);
 
     /**
-     * Unsubscribe from the specific topic partitions. Messages for these partitions will not be returned from the next 
-     * {@link #poll(long) poll()} onwards. This should be used in conjunction with 
-     * {@link #subscribe(TopicPartition...) subscribe(topic, partitions)}. It is an error to
-     * unsubscribe from a partition that was never subscribed to using {@link #subscribe(TopicPartition...) subscribe(partitions)}
-     * @param partitions Partitions to unsubscribe from
+     * @see KafkaConsumer#unsubscribe(TopicPartition...)
      */
     public void unsubscribe(TopicPartition... partitions);
-    
+
     /**
-     * Fetches data for the subscribed list of topics and partitions
-     * @param timeout  The time, in milliseconds, spent waiting in poll if data is not available. If 0, waits indefinitely. Must not be negative
-     * @return Map of topic to records for the subscribed topics and partitions as soon as data is available for a topic partition. Availability
-     *         of data is controlled by {@link ConsumerConfig#FETCH_MIN_BYTES_CONFIG} and {@link ConsumerConfig#FETCH_MAX_WAIT_MS_CONFIG}.
-     *         If no data is available for timeout ms, returns an empty list
+     * @see KafkaConsumer#poll(long)
      */
-    public Map<String, ConsumerRecords<K,V>> poll(long timeout);
+    public ConsumerRecords<K, V> poll(long timeout);
 
     /**
-     * Commits offsets returned on the last {@link #poll(long) poll()} for the subscribed list of topics and partitions.
-     * @param sync If true, the commit should block until the consumer receives an acknowledgment 
-     * @return An {@link OffsetMetadata} object that contains the partition, offset and a corresponding error code. Returns null
-     * if the sync flag is set to false 
+     * @see KafkaConsumer#commit(CommitType)
      */
-    public OffsetMetadata commit(boolean sync);
+    public void commit(CommitType commitType);
 
     /**
-     * Commits the specified offsets for the specified list of topics and partitions to Kafka.
-     * @param offsets The map of offsets to commit for the given topic partitions
-     * @param sync If true, commit will block until the consumer receives an acknowledgment 
-     * @return An {@link OffsetMetadata} object that contains the partition, offset and a corresponding error code. Returns null
-     * if the sync flag is set to false. 
+     * @see KafkaConsumer#commit(Map, CommitType)
      */
-    public OffsetMetadata commit(Map<TopicPartition, Long> offsets, boolean sync);
-    
+    public void commit(Map<TopicPartition, Long> offsets, CommitType commitType);
+
     /**
-     * Overrides the fetch positions that the consumer will use on the next fetch request. If the consumer subscribes to a list of topics
-     * using {@link #subscribe(String...) subscribe(topics)}, an exception will be thrown if the specified topic partition is not owned by
-     * the consumer.  
-     * @param offsets The map of fetch positions per topic and partition
+     * @see KafkaConsumer#seek(TopicPartition, long)
      */
-    public void seek(Map<TopicPartition, Long> offsets);
+    public void seek(TopicPartition partition, long offset);
 
     /**
-     * Returns the fetch position of the <i>next message</i> for the specified topic partition to be used on the next {@link #poll(long) poll()}
-     * @param partitions Partitions for which the fetch position will be returned
-     * @return The position from which data will be fetched for the specified partition on the next {@link #poll(long) poll()}
+     * @see KafkaConsumer#seekToBeginning(TopicPartition...)
      */
-    public Map<TopicPartition, Long> position(Collection<TopicPartition> partitions);
-    
+    public void seekToBeginning(TopicPartition... partitions);
+
     /**
-     * Fetches the last committed offsets for the input list of partitions 
-     * @param partitions The list of partitions to return the last committed offset for
-     * @return  The list of offsets for the specified list of partitions
+     * @see KafkaConsumer#seekToEnd(TopicPartition...)
      */
-    public Map<TopicPartition, Long> committed(Collection<TopicPartition> partitions);
-    
+    public void seekToEnd(TopicPartition... partitions);
+
+    /**
+     * @see KafkaConsumer#position(TopicPartition)
+     */
+    public long position(TopicPartition partition);
+
     /**
-     * Fetches offsets before a certain timestamp
-     * @param timestamp The unix timestamp. Value -1 indicates earliest available timestamp. Value -2 indicates latest available timestamp. 
-     * @param partitions The list of partitions for which the offsets are returned
-     * @return The offsets for messages that were written to the server before the specified timestamp.
+     * @see KafkaConsumer#committed(TopicPartition)
      */
-    public Map<TopicPartition, Long> offsetsBeforeTime(long timestamp, Collection<TopicPartition> partitions);
+    public long committed(TopicPartition partition);
 
     /**
-     * Return a map of metrics maintained by the consumer
+     * @see KafkaConsumer#metrics()
      */
     public Map<MetricName, ? extends Metric> metrics();
 
     /**
-     * Close this consumer
+     * @see KafkaConsumer#partitionsFor(String)
+     */
+    public List<PartitionInfo> partitionsFor(String topic);
+
+    /**
+     * @see KafkaConsumer#close()
      */
     public void close();
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
index 57c1807..6d4ff7c 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
@@ -9,13 +9,16 @@
  * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  * specific language governing permissions and limitations under the License.
-*/
+ */
 package org.apache.kafka.clients.consumer;
 
 import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
+import static org.apache.kafka.common.config.ConfigDef.ValidString.in;
 
 import java.util.Map;
 
+import org.apache.kafka.clients.CommonClientConfigs;
+import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceCallback;
 import org.apache.kafka.common.config.AbstractConfig;
 import org.apache.kafka.common.config.ConfigDef;
 import org.apache.kafka.common.config.ConfigDef.Importance;
@@ -27,130 +30,121 @@ import org.apache.kafka.common.config.ConfigDef.Type;
 public class ConsumerConfig extends AbstractConfig {
     private static final ConfigDef config;
 
-    /**
-     * The identifier of the group this consumer belongs to. This is required if the consumer uses either the
-     * group management functionality by using {@link Consumer#subscribe(String...) subscribe(topics)}. This is also required
-     * if the consumer uses the default Kafka based offset management strategy. 
+    /*
+     * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS
+     * THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
      */
-    public static final String GROUP_ID_CONFIG = "group.id";
-    
+
     /**
-     * The timeout after which, if the {@link Consumer#poll(long) poll(timeout)} is not invoked, the consumer is
-     * marked dead and a rebalance operation is triggered for the group identified by {@link #GROUP_ID_CONFIG}. Relevant 
-     * if the consumer uses the group management functionality by invoking {@link Consumer#subscribe(String...) subscribe(topics)} 
+     * <code>group.id</code>
      */
-    public static final String SESSION_TIMEOUT_MS = "session.timeout.ms";
+    public static final String GROUP_ID_CONFIG = "group.id";
+    private static final String GROUP_ID_DOC = "A unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using <code>subscribe(topic)</code> or the Kafka-based offset management strategy.";
 
     /**
-     * The number of times a consumer sends a heartbeat to the co-ordinator broker within a {@link #SESSION_TIMEOUT_MS} time window.
-     * This frequency affects the latency of a rebalance operation since the co-ordinator broker notifies a consumer of a rebalance 
-     * in the heartbeat response. Relevant if the consumer uses the group management functionality by invoking 
-     * {@link Consumer#subscribe(String...) subscribe(topics)} 
+     * <code>session.timeout.ms</code>
      */
-    public static final String HEARTBEAT_FREQUENCY = "heartbeat.frequency";
+    public static final String SESSION_TIMEOUT_MS_CONFIG = "session.timeout.ms";
+    private static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to detect failures when using Kafka's group management facilities.";
 
     /**
-     * A list of URLs to use for establishing the initial connection to the cluster. This list should be in the form
-     * <code>host1:port1,host2:port2,...</code>. These urls are just used for the initial connection to discover the
-     * full cluster membership (which may change dynamically) so this list need not contain the full set of servers (you
-     * may want more than one, though, in case a server is down).
+     * <code>bootstrap.servers</code>
      */
-    public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
+    public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG;
 
     /**
-     * If true, periodically commit to Kafka the offsets of messages already returned by the consumer. This committed 
-     * offset will be used when the process fails as the position from which the consumption will begin.
+     * <code>enable.auto.commit</code>
      */
     public static final String ENABLE_AUTO_COMMIT_CONFIG = "enable.auto.commit";
-    
+    private static final String ENABLE_AUTO_COMMIT_DOC = "If true the consumer's offset will be periodically committed in the background.";
+
     /**
-     * The friendly name of the partition assignment strategy that the server will use to distribute partition ownership
-     * amongst consumer instances when group management is used
+     * <code>auto.commit.interval.ms</code>
      */
-    public static final String PARTITION_ASSIGNMENT_STRATEGY = "partition.assignment.strategy";
-    
+    public static final String AUTO_COMMIT_INTERVAL_MS_CONFIG = "auto.commit.interval.ms";
+    private static final String AUTO_COMMIT_INTERVAL_MS_DOC = "The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if <code>enable.auto.commit</code> is set to <code>true</code>.";
+
     /**
-     * The frequency in milliseconds that the consumer offsets are committed to Kafka. Relevant if {@link #ENABLE_AUTO_COMMIT_CONFIG}
-     * is turned on.
+     * <code>partition.assignment.strategy</code>
      */
-    public static final String AUTO_COMMIT_INTERVAL_MS_CONFIG = "auto.commit.interval.ms";
-    
+    public static final String PARTITION_ASSIGNMENT_STRATEGY_CONFIG = "partition.assignment.strategy";
+    private static final String PARTITION_ASSIGNMENT_STRATEGY_DOC = "The friendly name of the partition assignment strategy that the server will use to distribute partition ownership amongst consumer instances when group management is used";
+
     /**
-     * What to do when there is no initial offset in Kafka or if an offset is out of range:
-     * <ul>
-     * <li> smallest:      automatically reset the offset to the smallest offset
-     * <li> largest:       automatically reset the offset to the largest offset
-     * <li> disable:       throw exception to the consumer if no previous offset is found for the consumer's group
-     * <li> anything else: throw exception to the consumer. 
-     * </ul> 
+     * <code>auto.offset.reset</code>
      */
     public static final String AUTO_OFFSET_RESET_CONFIG = "auto.offset.reset";
-    
+    private static final String AUTO_OFFSET_RESET_DOC = "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted): <ul><li>smallest: automatically reset the offset to the smallest offset<li>largest: automatically reset the offset to the largest offset</li><li>none: throw exception to the consumer if no previous offset is found for the consumer's group</li><li>anything else: throw exception to the consumer.</li></ul>";
+
     /**
-     * The minimum amount of data the server should return for a fetch request. If insufficient data is available the 
-     * request will wait for that much data to accumulate before answering the request.
+     * <code>fetch.min.bytes</code>
      */
     public static final String FETCH_MIN_BYTES_CONFIG = "fetch.min.bytes";
-    
+    private static final String FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of 1 byte means that fetch requests are answered as soon as a single byte of data is available or the fetch request times out waiting for data to arrive. Setting this to something greater than 1 will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency.";
+
     /**
-     * The maximum amount of time the server will block before answering the fetch request if there isn't sufficient 
-     * data to immediately satisfy {@link #FETCH_MIN_BYTES_CONFIG}. This should be less than or equal to the timeout used in 
-     * {@link KafkaConsumer#poll(long) poll(timeout)}
+     * <code>fetch.max.wait.ms</code>
      */
     public static final String FETCH_MAX_WAIT_MS_CONFIG = "fetch.max.wait.ms";
-    
+    private static final String FETCH_MAX_WAIT_MS_DOC = "The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes.";
+
+    /** <code>metadata.max.age.ms</code> */
+    public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG;
+
     /**
-     * The maximum amount of time to block waiting to fetch metadata about a topic the first time a record is received 
-     * from that topic. The consumer will throw a TimeoutException if it could not successfully fetch metadata within
-     * this timeout.
+     * <code>max.partition.fetch.bytes</code>
      */
-    public static final String METADATA_FETCH_TIMEOUT_CONFIG = "metadata.fetch.timeout.ms";
+    public static final String MAX_PARTITION_FETCH_BYTES_CONFIG = "max.partition.fetch.bytes";
+    private static final String MAX_PARTITION_FETCH_BYTES_DOC = "The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition.";
+
+    /** <code>send.buffer.bytes</code> */
+    public static final String SEND_BUFFER_CONFIG = CommonClientConfigs.SEND_BUFFER_CONFIG;
+
+    /** <code>receive.buffer.bytes</code> */
+    public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG;
 
     /**
-     * The total memory used by the consumer to buffer records received from the server. This config is meant to control
-     * the consumer's memory usage, so it is the size of the global fetch buffer that will be shared across all partitions. 
+     * <code>client.id</code>
      */
-    public static final String TOTAL_BUFFER_MEMORY_CONFIG = "total.memory.bytes";
+    public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG;
 
     /**
-     * The minimum amount of memory that should be used to fetch at least one message for a partition. This puts a lower
-     * bound on the consumer's memory utilization when there is at least one message for a partition available on the server.
-     * This size must be at least as large as the maximum message size the server allows or else it is possible for the producer 
-     * to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large 
-     * message on a certain partition. 
+     * <code>reconnect.backoff.ms</code>
      */
-    public static final String FETCH_BUFFER_CONFIG = "fetch.buffer.bytes";
-    
+    public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG;
+
     /**
-     * The id string to pass to the server when making requests. The purpose of this is to be able to track the source
-     * of requests beyond just ip/port by allowing a logical application name to be included.
+     * <code>retry.backoff.ms</code>
      */
-    public static final String CLIENT_ID_CONFIG = "client.id";
+    public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG;
 
     /**
-     * The size of the TCP send buffer to use when fetching data
+     * <code>metrics.sample.window.ms</code>
      */
-    public static final String SOCKET_RECEIVE_BUFFER_CONFIG = "socket.receive.buffer.bytes";
+    public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG;
 
     /**
-     * The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a
-     * host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.
+     * <code>metrics.num.samples</code>
      */
-    public static final String RECONNECT_BACKOFF_MS_CONFIG = "reconnect.backoff.ms";
+    public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG;
 
-    /** <code>metrics.sample.window.ms</code> */
-    public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms";
-    private static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The metrics system maintains a configurable number of samples over a fixed window size. This configuration " + "controls the size of the window. For example we might maintain two samples each measured over a 30 second period. "
-                                                               + "When a window expires we erase and overwrite the oldest window.";
-
-    /** <code>metrics.num.samples</code> */
-    public static final String METRICS_NUM_SAMPLES_CONFIG = "metrics.num.samples";
-    private static final String METRICS_NUM_SAMPLES_DOC = "The number of samples maintained to compute metrics.";
+    /**
+     * <code>metric.reporters</code>
+     */
+    public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG;
 
-    /** <code>metric.reporters</code> */
-    public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters";
-    private static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the <code>MetricReporter</code> interface allows " + "plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.";
+    /**
+     * <code>rebalance.callback.class</code>
+     */
+    public static final String CONSUMER_REBALANCE_CALLBACK_CLASS_CONFIG = "rebalance.callback.class";
+    private static final String CONSUMER_REBALANCE_CALLBACK_CLASS_DOC = "A user-provided callback to execute when partition assignments change.";
 
+    /**
+     * <code>check.crcs</code>
+     */
+    public static final String CHECK_CRCS_CONFIG = "check.crcs";
+    private static final String CHECK_CRCS_DOC = "Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.";
+    
     /** <code>key.deserializer</code> */
     public static final String KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer";
     private static final String KEY_DESERIALIZER_CLASS_DOC = "Deserializer class for key that implements the <code>Deserializer</code> interface.";
@@ -160,38 +154,134 @@ public class ConsumerConfig extends AbstractConfig {
     private static final String VALUE_DESERIALIZER_CLASS_DOC = "Deserializer class for value that implements the <code>Deserializer</code> interface.";
 
     static {
-        /* TODO: add config docs */
-        config = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Importance.HIGH, "blah blah")
-                                .define(GROUP_ID_CONFIG, Type.STRING, Importance.HIGH, "blah blah")
-                                .define(SESSION_TIMEOUT_MS, Type.LONG, 1000, Importance.HIGH, "blah blah")
-                                .define(HEARTBEAT_FREQUENCY, Type.INT, 3, Importance.MEDIUM, "blah blah")
-                                .define(PARTITION_ASSIGNMENT_STRATEGY, Type.STRING, Importance.MEDIUM, "blah blah")
-                                .define(METADATA_FETCH_TIMEOUT_CONFIG, Type.LONG, 60 * 1000, atLeast(0), Importance.MEDIUM, "blah blah")
-                                .define(ENABLE_AUTO_COMMIT_CONFIG, Type.BOOLEAN, true, Importance.MEDIUM, "blah blah")
-                                .define(AUTO_COMMIT_INTERVAL_MS_CONFIG, Type.LONG, 5000, atLeast(0), Importance.LOW, "blah blah")
-                                .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.LOW, "blah blah")
-                                .define(TOTAL_BUFFER_MEMORY_CONFIG, Type.LONG, 32 * 1024 * 1024L, atLeast(0L), Importance.LOW, "blah blah")
-                                .define(FETCH_BUFFER_CONFIG, Type.INT, 1 * 1024 * 1024, atLeast(0), Importance.HIGH, "blah blah")
-                                .define(SOCKET_RECEIVE_BUFFER_CONFIG, Type.INT, 128 * 1024, atLeast(0), Importance.LOW, "blah blah")
-                                .define(FETCH_MIN_BYTES_CONFIG, Type.LONG, 1024, atLeast(0), Importance.HIGH, "blah blah")
-                                .define(FETCH_MAX_WAIT_MS_CONFIG, Type.LONG, 500, atLeast(0), Importance.LOW, "blah blah")
-                                .define(RECONNECT_BACKOFF_MS_CONFIG, Type.LONG, 10L, atLeast(0L), Importance.LOW, "blah blah")
-                                .define(AUTO_OFFSET_RESET_CONFIG, Type.STRING, "largest", Importance.MEDIUM, "blah blah")
+        config = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG,
+                                        Type.LIST,
+                                        Importance.HIGH,
+                                        CommonClientConfigs.BOOSTRAP_SERVERS_DOC)
+                                .define(GROUP_ID_CONFIG, Type.STRING, "", Importance.HIGH, GROUP_ID_DOC)
+                                .define(SESSION_TIMEOUT_MS_CONFIG,
+                                        Type.LONG,
+                                        30000,
+                                        Importance.HIGH,
+                                        SESSION_TIMEOUT_MS_DOC)
+                                .define(PARTITION_ASSIGNMENT_STRATEGY_CONFIG,
+                                        Type.STRING,
+                                        "blah",
+                                        Importance.MEDIUM,
+                                        PARTITION_ASSIGNMENT_STRATEGY_DOC)
+                                .define(METADATA_MAX_AGE_CONFIG,
+                                        Type.LONG,
+                                        5 * 60 * 1000,
+                                        atLeast(0),
+                                        Importance.LOW,
+                                        CommonClientConfigs.METADATA_MAX_AGE_DOC)
+                                .define(ENABLE_AUTO_COMMIT_CONFIG,
+                                        Type.BOOLEAN,
+                                        true,
+                                        Importance.MEDIUM,
+                                        ENABLE_AUTO_COMMIT_DOC)
+                                .define(AUTO_COMMIT_INTERVAL_MS_CONFIG,
+                                        Type.LONG,
+                                        5000,
+                                        atLeast(0),
+                                        Importance.LOW,
+                                        AUTO_COMMIT_INTERVAL_MS_DOC)
+                                .define(CLIENT_ID_CONFIG,
+                                        Type.STRING,
+                                        "",
+                                        Importance.LOW,
+                                        CommonClientConfigs.CLIENT_ID_DOC)
+                                .define(MAX_PARTITION_FETCH_BYTES_CONFIG,
+                                        Type.INT,
+                                        1 * 1024 * 1024,
+                                        atLeast(0),
+                                        Importance.HIGH,
+                                        MAX_PARTITION_FETCH_BYTES_DOC)
+                                .define(SEND_BUFFER_CONFIG,
+                                        Type.INT,
+                                        128 * 1024,
+                                        atLeast(0),
+                                        Importance.MEDIUM,
+                                        CommonClientConfigs.SEND_BUFFER_DOC)
+                                .define(RECEIVE_BUFFER_CONFIG,
+                                        Type.INT,
+                                        32 * 1024,
+                                        atLeast(0),
+                                        Importance.MEDIUM,
+                                        CommonClientConfigs.RECEIVE_BUFFER_DOC)
+                                .define(FETCH_MIN_BYTES_CONFIG,
+                                        Type.INT,
+                                        1024,
+                                        atLeast(0),
+                                        Importance.HIGH,
+                                        FETCH_MIN_BYTES_DOC)
+                                .define(FETCH_MAX_WAIT_MS_CONFIG,
+                                        Type.INT,
+                                        500,
+                                        atLeast(0),
+                                        Importance.LOW,
+                                        FETCH_MAX_WAIT_MS_DOC)
+                                .define(RECONNECT_BACKOFF_MS_CONFIG,
+                                        Type.LONG,
+                                        50L,
+                                        atLeast(0L),
+                                        Importance.LOW,
+                                        CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC)
+                                .define(RETRY_BACKOFF_MS_CONFIG,
+                                        Type.LONG,
+                                        100L,
+                                        atLeast(0L),
+                                        Importance.LOW,
+                                        CommonClientConfigs.RETRY_BACKOFF_MS_DOC)
+                                .define(AUTO_OFFSET_RESET_CONFIG,
+                                        Type.STRING,
+                                        "latest",
+                                        in("latest", "earliest", "none"),
+                                        Importance.MEDIUM,
+                                        AUTO_OFFSET_RESET_DOC)
+                                .define(CONSUMER_REBALANCE_CALLBACK_CLASS_CONFIG,
+                                        Type.CLASS,
+                                        NoOpConsumerRebalanceCallback.class,
+                                        Importance.LOW,
+                                        CONSUMER_REBALANCE_CALLBACK_CLASS_DOC)
+                                .define(CHECK_CRCS_CONFIG,
+                                        Type.BOOLEAN,
+                                        true,
+                                        Importance.LOW,
+                                        CHECK_CRCS_DOC)                                
                                 .define(METRICS_SAMPLE_WINDOW_MS_CONFIG,
                                         Type.LONG,
                                         30000,
                                         atLeast(0),
                                         Importance.LOW,
-                                        METRICS_SAMPLE_WINDOW_MS_DOC)
-                                .define(METRICS_NUM_SAMPLES_CONFIG, Type.INT, 2, atLeast(1), Importance.LOW, METRICS_NUM_SAMPLES_DOC)
-                                .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, "", Importance.LOW, METRIC_REPORTER_CLASSES_DOC)
-                                .define(KEY_DESERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_DESERIALIZER_CLASS_DOC)
-                                .define(VALUE_DESERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_DESERIALIZER_CLASS_DOC);
-
+                                        CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC)
+                                .define(METRICS_NUM_SAMPLES_CONFIG,
+                                        Type.INT,
+                                        2,
+                                        atLeast(1),
+                                        Importance.LOW,
+                                        CommonClientConfigs.METRICS_NUM_SAMPLES_DOC)
+                                .define(METRIC_REPORTER_CLASSES_CONFIG,
+                                        Type.LIST,
+                                        "",
+                                        Importance.LOW,
+                                        CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC)
+                                .define(KEY_DESERIALIZER_CLASS_CONFIG,
+                                        Type.CLASS,
+                                        Importance.HIGH,
+                                        KEY_DESERIALIZER_CLASS_DOC)
+                                .define(VALUE_DESERIALIZER_CLASS_CONFIG,
+                                        Type.CLASS,
+                                        Importance.HIGH,
+                                        VALUE_DESERIALIZER_CLASS_DOC);
     }
 
     ConsumerConfig(Map<? extends Object, ? extends Object> props) {
         super(config, props);
     }
 
+    public static void main(String[] args) {
+        System.out.println(config.toHtmlTable());
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.java
index e4cf7d1..74dfdba 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.java
@@ -9,7 +9,7 @@
  * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  * specific language governing permissions and limitations under the License.
-*/
+ */
 package org.apache.kafka.clients.consumer;
 
 import java.util.Collection;
@@ -17,34 +17,77 @@ import java.util.Collection;
 import org.apache.kafka.common.TopicPartition;
 
 /**
- * A callback interface that the user can implement to manage customized offsets on the start and end of 
- * every rebalance operation. This callback will execute in the user thread as part of the 
- * {@link Consumer#poll(long) poll(long)} API on every rebalance attempt.
- * Default implementation of the callback will {@link Consumer#seek(java.util.Map) seek(offsets)} to the last committed offsets in the
- * {@link #onPartitionsAssigned(Consumer, Collection) onPartitionsAssigned()} callback. And will commit offsets synchronously
- * for the specified list of partitions to Kafka in the {@link #onPartitionsRevoked(Consumer, Collection) onPartitionsRevoked()}
- * callback.
+ * A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the
+ * consumer changes.
+ * <p>
+ * This is applicable when the consumer is having Kafka auto-manage group membership, if the consumer's directly subscribe to partitions
+ * those partitions will never be reassigned and this callback is not applicable.
+ * <p>
+ * When Kafka is managing the group membership, a partition re-assignment will be triggered any time the members of the group changes or the subscription
+ * of the members changes. This can occur when processes die, new process instances are added or old instances come back to life after failure.
+ * <p>
+ * There are many uses for this functionality. One common use is saving offsets in a custom store. By saving offsets in
+ * the {@link #onPartitionsRevoked(Consumer, Collection)} call we can ensure that any time partition assignment changes
+ * the offset gets saved.
+ * <p>
+ * Another use is flushing out any kind of cache of intermediate results the consumer may be keeping. For example,
+ * consider a case where the consumer is subscribed to a topic containing user page views, and the goal is to count the
+ * number of page views per users for each five minute window. Let's say the topic is partitioned by the user id so that
+ * all events for a particular user will go to a single consumer instance. The consumer can keep in memory a running
+ * tally of actions per user and only flush these out to a remote data store when it's cache gets to big. However if a
+ * partition is reassigned it may want to automatically trigger a flush of this cache, before the new owner takes over
+ * consumption.
+ * <p>
+ * This callback will execute in the user thread as part of the {@link Consumer#poll(long) poll(long)} call whenever partition assignment changes.
+ * <p>
+ * It is guaranteed that all consumer processes will invoke {@link #onPartitionsRevoked(Consumer, Collection) onPartitionsRevoked} prior to 
+ * any process invoking {@link #onPartitionsAssigned(Consumer, Collection) onPartitionsAssigned}. So if offsets or other state is saved in the 
+ * {@link #onPartitionsRevoked(Consumer, Collection) onPartitionsRevoked} call it is guaranteed to be saved by the time the process taking over that
+ * partition has their {@link #onPartitionsAssigned(Consumer, Collection) onPartitionsAssigned} callback called to load the state.
+ * <p>
+ * Here is pseudo-code for a callback implementation for saving offsets:
+ * <pre>
+ * {@code
+ *   public class SaveOffsetsOnRebalance implements ConsumerRebalanceCallback {
+ *       public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
+ *           // read the offsets from an external store using some custom code not described here
+ *           for(TopicPartition partition: partitions)
+ *              consumer.position(partition, readOffsetFromExternalStore(partition));
+ *       }      
+ *       public void onPartitionsRevoked(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
+ *           // save the offsets in an external store using some custom code not described here
+ *           for(TopicPartition partition: partitions)
+ *              saveOffsetInExternalStore(consumer.position(partition));
+ *       }
+ *   }
+ * }
+ * </pre>
  */
 public interface ConsumerRebalanceCallback {
 
     /**
-     * A callback method the user can implement to provide handling of customized offsets on completion of a successful 
-     * rebalance operation. This method will be called after a rebalance operation completes and before the consumer 
-     * starts fetching data.
-     * <p> 
-     * For examples on usage of this API, see Usage Examples section of {@link KafkaConsumer KafkaConsumer}  
-     * @param partitions The list of partitions that are assigned to the consumer after rebalance
+     * A callback method the user can implement to provide handling of customized offsets on completion of a successful
+     * partition re-assignement. This method will be called after an offset re-assignement completes and before the
+     * consumer starts fetching data.
+     * <p>
+     * It is guaranteed that all the processes in a consumer group will execute their
+     * {@link #onPartitionsRevoked(Consumer, Collection)} callback before any instance executes its
+     * {@link #onPartitionsAssigned(Consumer, Collection)} callback.
+     * 
+     * @param partitions The list of partitions that are now assigned to the consumer (may include partitions previously
+     *            assigned to the consumer)
      */
-    public void onPartitionsAssigned(Consumer<?,?> consumer, Collection<TopicPartition> partitions);
-    
+    public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> partitions);
+
     /**
-     * A callback method the user can implement to provide handling of offset commits to a customized store on the 
-     * start of a rebalance operation. This method will be called before a rebalance operation starts and after the 
-     * consumer stops fetching data. It is recommended that offsets should be committed in this callback to 
-     * either Kafka or a custom offset store to prevent duplicate data 
-     * <p> 
-     * For examples on usage of this API, see Usage Examples section of {@link KafkaConsumer KafkaConsumer}  
+     * A callback method the user can implement to provide handling of offset commits to a customized store on the start
+     * of a rebalance operation. This method will be called before a rebalance operation starts and after the consumer
+     * stops fetching data. It is recommended that offsets should be committed in this callback to either Kafka or a
+     * custom offset store to prevent duplicate data
+     * <p>
+     * For examples on usage of this API, see Usage Examples section of {@link KafkaConsumer KafkaConsumer}
+     * 
      * @param partitions The list of partitions that were assigned to the consumer on the last rebalance
      */
-    public void onPartitionsRevoked(Consumer<?,?> consumer, Collection<TopicPartition> partitions);
+    public void onPartitionsRevoked(Consumer<?, ?> consumer, Collection<TopicPartition> partitions);
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/0699ff2c/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java
index 16af70a..466254e 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java
@@ -9,119 +9,76 @@
  * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  * specific language governing permissions and limitations under the License.
-*/
+ */
 package org.apache.kafka.clients.consumer;
 
-import org.apache.kafka.common.TopicPartition;
-
 /**
- * A key/value pair to be received from Kafka. This consists of a topic name and a partition number, from which the 
- * record is being received and an offset that points to the record in a Kafka partition. 
+ * A key/value pair to be received from Kafka. This consists of a topic name and a partition number, from which the
+ * record is being received and an offset that points to the record in a Kafka partition.
  */
-public final class ConsumerRecord<K,V> {
-    private final TopicPartition partition; 
+public final class ConsumerRecord<K, V> {
+    private final String topic;
+    private final int partition;
+    private final long offset;
     private final K key;
     private final V value;
-    private final long offset;
-    private volatile Exception error;
-    
-    /**
-     * Creates a record to be received from a specified topic and partition
-     * 
-     * @param topic     The topic this record is received from
-     * @param partitionId The partition of the topic this record is received from
-     * @param key       The key of the record, if one exists
-     * @param value     The record contents
-     * @param offset    The offset of this record in the corresponding Kafka partition
-     */
-    public ConsumerRecord(String topic, int partitionId, K key, V value, long offset) {
-        this(topic, partitionId, key, value, offset, null);
-    }
 
     /**
      * Create a record with no key
      * 
      * @param topic The topic this record is received from
-     * @param partitionId The partition of the topic this record is received from
-     * @param value The record contents
+     * @param partition The partition of the topic this record is received from
      * @param offset The offset of this record in the corresponding Kafka partition
+     * @param value The record contents
      */
-    public ConsumerRecord(String topic, int partitionId, V value, long offset) {
-        this(topic, partitionId, null, value, offset);
-    }
-
-    /**
-     * Creates a record with an error code
-     * @param topic     The topic this record is received from
-     * @param partitionId The partition of the topic this record is received from
-     * @param error     The exception corresponding to the error code returned by the server for this topic partition
-     */
-    public ConsumerRecord(String topic, int partitionId, Exception error) {
-        this(topic, partitionId, null, null, -1L, error);
-    }
-    
-    private ConsumerRecord(String topic, int partitionId, K key, V value, long offset, Exception error) {
+    public ConsumerRecord(String topic, int partition, long offset, K key, V value) {
         if (topic == null)
             throw new IllegalArgumentException("Topic cannot be null");
-        this.partition = new TopicPartition(topic, partitionId);
+        this.topic = topic;
+        this.partition = partition;
+        this.offset = offset;
         this.key = key;
         this.value = value;
-        this.offset = offset;  
-        this.error = error;
     }
-    
+
     /**
      * The topic this record is received from
      */
     public String topic() {
-        return partition.topic();
+        return this.topic;
     }
 
     /**
-     * The partition from which this record is received 
+     * The partition from which this record is received
      */
     public int partition() {
-        return partition.partition();
+        return this.partition;
     }
-    
-    /**
-     * The TopicPartition object containing the topic and partition
-     */
-    public TopicPartition topicAndPartition() {
-        return partition;
-    }
-    
+
     /**
      * The key (or null if no key is specified)
-     * @throws Exception The exception thrown while fetching this record.
      */
     public K key() throws Exception {
-        if (this.error != null)
-            throw this.error;
         return key;
     }
 
     /**
      * The value
-     * @throws Exception The exception thrown while fetching this record.
      */
     public V value() throws Exception {
-        if (this.error != null)
-            throw this.error;
         return value;
     }
 
     /**
      * The position of this record in the corresponding Kafka partition.
-     * @throws Exception The exception thrown while fetching this record.
      */
-    public long offset() throws Exception {
-        if (this.error != null)
-            throw this.error;
+    public long offset() {
         return offset;
     }
 
-    public Exception error() {
-        return this.error;
+    @Override
+    public String toString() {
+        return "ConsumerRecord(topic = " + topic() + ", partition = " + partition() + ", offset = " + offset()
+                + ", key = " + key + ", value = " + value + ")";
     }
 }