You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by ju...@apache.org on 2015/11/10 02:09:42 UTC

[2/3] kafka-site git commit: add 0.9.0 docs

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/connect_config.html
----------------------------------------------------------------------
diff --git a/090/connect_config.html b/090/connect_config.html
new file mode 100644
index 0000000..c683d54
--- /dev/null
+++ b/090/connect_config.html
@@ -0,0 +1,112 @@
+<table>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Default</th>
+<th>Valid Values</th>
+<th>Importance</th>
+<th>Description</th>
+</tr>
+<tr>
+<td>group.id</td><td>string</td><td></td><td></td><td>high</td><td>A unique string that identifies the Connect cluster group this worker belongs to.</td></tr>
+<tr>
+<td>internal.key.converter</td><td>class</td><td></td><td></td><td>high</td><td>Converter class for internal key Connect data that implements the <code>Converter</code> interface. Used for converting data like offsets and configs.</td></tr>
+<tr>
+<td>internal.value.converter</td><td>class</td><td></td><td></td><td>high</td><td>Converter class for offset value Connect data that implements the <code>Converter</code> interface. Used for converting data like offsets and configs.</td></tr>
+<tr>
+<td>key.converter</td><td>class</td><td></td><td></td><td>high</td><td>Converter class for key Connect data that implements the <code>Converter</code> interface.</td></tr>
+<tr>
+<td>value.converter</td><td>class</td><td></td><td></td><td>high</td><td>Converter class for value Connect data that implements the <code>Converter</code> interface.</td></tr>
+<tr>
+<td>bootstrap.servers</td><td>list</td><td>[localhost:9092]</td><td></td><td>high</td><td>A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form <code>host1:port1,host2:port2,...</code>. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).</td></tr>
+<tr>
+<td>cluster</td><td>string</td><td>connect</td><td></td><td>high</td><td>ID for this cluster, which is used to provide a namespace so multiple Kafka Connect clusters or instances may co-exist while sharing a single Kafka cluster.</td></tr>
+<tr>
+<td>heartbeat.interval.ms</td><td>int</td><td>3000</td><td></td><td>high</td><td>The expected time between heartbeats to the group coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the worker's session stays active and to facilitate rebalancing when new members join or leave the group. The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.</td></tr>
+<tr>
+<td>session.timeout.ms</td><td>int</td><td>30000</td><td></td><td>high</td><td>The timeout used to detect failures when using Kafka's group management facilities.</td></tr>
+<tr>
+<td>ssl.key.password</td><td>string</td><td>null</td><td></td><td>high</td><td>The password of the private key in the key store file. This is optional for client.</td></tr>
+<tr>
+<td>ssl.keystore.location</td><td>string</td><td>null</td><td></td><td>high</td><td>The location of the key store file. This is optional for client and can be used for two-way authentication for client.</td></tr>
+<tr>
+<td>ssl.keystore.password</td><td>string</td><td>null</td><td></td><td>high</td><td>The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured. </td></tr>
+<tr>
+<td>ssl.truststore.location</td><td>string</td><td>null</td><td></td><td>high</td><td>The location of the trust store file. </td></tr>
+<tr>
+<td>ssl.truststore.password</td><td>string</td><td>null</td><td></td><td>high</td><td>The password for the trust store file. </td></tr>
+<tr>
+<td>connections.max.idle.ms</td><td>long</td><td>540000</td><td></td><td>medium</td><td>Close idle connections after the number of milliseconds specified by this config.</td></tr>
+<tr>
+<td>receive.buffer.bytes</td><td>int</td><td>32768</td><td>[0,...]</td><td>medium</td><td>The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.</td></tr>
+<tr>
+<td>request.timeout.ms</td><td>int</td><td>40000</td><td>[0,...]</td><td>medium</td><td>The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.</td></tr>
+<tr>
+<td>sasl.kerberos.principal.to.local.rules</td><td>list</td><td>[DEFAULT]</td><td></td><td>medium</td><td>A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form <username>/<hostname>@<REALM> are mapped to <username>.</td></tr>
+<tr>
+<td>sasl.kerberos.service.name</td><td>string</td><td>null</td><td></td><td>medium</td><td>The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.</td></tr>
+<tr>
+<td>security.protocol</td><td>string</td><td>PLAINTEXT</td><td></td><td>medium</td><td>Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported.</td></tr>
+<tr>
+<td>send.buffer.bytes</td><td>int</td><td>131072</td><td>[0,...]</td><td>medium</td><td>The size of the TCP send buffer (SO_SNDBUF) to use when sending data.</td></tr>
+<tr>
+<td>ssl.enabled.protocols</td><td>list</td><td>[TLSv1.2, TLSv1.1, TLSv1]</td><td></td><td>medium</td><td>The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.</td></tr>
+<tr>
+<td>ssl.keystore.type</td><td>string</td><td>JKS</td><td></td><td>medium</td><td>The file format of the key store file. This is optional for client. Default value is JKS</td></tr>
+<tr>
+<td>ssl.protocol</td><td>string</td><td>TLS</td><td></td><td>medium</td><td>The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.</td></tr>
+<tr>
+<td>ssl.provider</td><td>string</td><td>null</td><td></td><td>medium</td><td>The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.</td></tr>
+<tr>
+<td>ssl.truststore.type</td><td>string</td><td>JKS</td><td></td><td>medium</td><td>The file format of the trust store file. Default value is JKS.</td></tr>
+<tr>
+<td>worker.sync.timeout.ms</td><td>int</td><td>3000</td><td></td><td>medium</td><td>When the worker is out of sync with other workers and needs to resynchronize configurations, wait up to this amount of time before giving up, leaving the group, and waiting a backoff period before rejoining.</td></tr>
+<tr>
+<td>worker.unsync.backoff.ms</td><td>int</td><td>300000</td><td></td><td>medium</td><td>When the worker is out of sync with other workers and  fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining.</td></tr>
+<tr>
+<td>client.id</td><td>string</td><td>""</td><td></td><td>low</td><td>An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.</td></tr>
+<tr>
+<td>metadata.max.age.ms</td><td>long</td><td>300000</td><td>[0,...]</td><td>low</td><td>The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.</td></tr>
+<tr>
+<td>metric.reporters</td><td>list</td><td>[]</td><td></td><td>low</td><td>A list of classes to use as metrics reporters. Implementing the <code>MetricReporter</code> interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.</td></tr>
+<tr>
+<td>metrics.num.samples</td><td>int</td><td>2</td><td>[1,...]</td><td>low</td><td>The number of samples maintained to compute metrics.</td></tr>
+<tr>
+<td>metrics.sample.window.ms</td><td>long</td><td>30000</td><td>[0,...]</td><td>low</td><td>The number of samples maintained to compute metrics.</td></tr>
+<tr>
+<td>offset.flush.interval.ms</td><td>long</td><td>60000</td><td></td><td>low</td><td>Interval at which to try committing offsets for tasks.</td></tr>
+<tr>
+<td>offset.flush.timeout.ms</td><td>long</td><td>5000</td><td></td><td>low</td><td>Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt.</td></tr>
+<tr>
+<td>principal.builder.class</td><td>class</td><td>class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder</td><td></td><td>low</td><td>principal builder to generate a java Principal. This config is optional for client.</td></tr>
+<tr>
+<td>reconnect.backoff.ms</td><td>long</td><td>50</td><td>[0,...]</td><td>low</td><td>The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.</td></tr>
+<tr>
+<td>rest.advertised.host.name</td><td>string</td><td>null</td><td></td><td>low</td><td>If this is set, this is the hostname that will be given out to other workers to connect to.</td></tr>
+<tr>
+<td>rest.advertised.port</td><td>int</td><td>null</td><td></td><td>low</td><td>If this is set, this is the port that will be given out to other workers to connect to.</td></tr>
+<tr>
+<td>rest.host.name</td><td>string</td><td>null</td><td></td><td>low</td><td>Hostname for the REST API. If this is set, it will only bind to this interface.</td></tr>
+<tr>
+<td>rest.port</td><td>int</td><td>8083</td><td></td><td>low</td><td>Port for the REST API to listen on.</td></tr>
+<tr>
+<td>retry.backoff.ms</td><td>long</td><td>100</td><td>[0,...]</td><td>low</td><td>The amount of time to wait before attempting to retry a failed fetch request to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.</td></tr>
+<tr>
+<td>sasl.kerberos.kinit.cmd</td><td>string</td><td>/usr/bin/kinit</td><td></td><td>low</td><td>Kerberos kinit command path. Default is /usr/bin/kinit</td></tr>
+<tr>
+<td>sasl.kerberos.min.time.before.relogin</td><td>long</td><td>60000</td><td></td><td>low</td><td>Login thread sleep time between refresh attempts.</td></tr>
+<tr>
+<td>sasl.kerberos.ticket.renew.jitter</td><td>double</td><td>0.05</td><td></td><td>low</td><td>Percentage of random jitter added to the renewal time.</td></tr>
+<tr>
+<td>sasl.kerberos.ticket.renew.window.factor</td><td>double</td><td>0.8</td><td></td><td>low</td><td>Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.</td></tr>
+<tr>
+<td>ssl.cipher.suites</td><td>list</td><td>null</td><td></td><td>low</td><td>A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.</td></tr>
+<tr>
+<td>ssl.endpoint.identification.algorithm</td><td>string</td><td>null</td><td></td><td>low</td><td>The endpoint identification algorithm to validate server hostname using server certificate. </td></tr>
+<tr>
+<td>ssl.keymanager.algorithm</td><td>string</td><td>SunX509</td><td></td><td>low</td><td>The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.</td></tr>
+<tr>
+<td>ssl.trustmanager.algorithm</td><td>string</td><td>PKIX</td><td></td><td>low</td><td>The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.</td></tr>
+<tr>
+<td>task.shutdown.graceful.timeout.ms</td><td>long</td><td>5000</td><td></td><td>low</td><td>Amount of time to wait for tasks to shutdown gracefully. This is the total amount of time, not per task. All task have shutdown triggered, then they are waited on sequentially.</td></tr>
+</table>

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/consumer_config.html
----------------------------------------------------------------------
diff --git a/090/consumer_config.html b/090/consumer_config.html
new file mode 100644
index 0000000..7ad5a32
--- /dev/null
+++ b/090/consumer_config.html
@@ -0,0 +1,102 @@
+<table>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Default</th>
+<th>Valid Values</th>
+<th>Importance</th>
+<th>Description</th>
+</tr>
+<tr>
+<td>bootstrap.servers</td><td>list</td><td></td><td></td><td>high</td><td>A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form <code>host1:port1,host2:port2,...</code>. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).</td></tr>
+<tr>
+<td>key.deserializer</td><td>class</td><td></td><td></td><td>high</td><td>Deserializer class for key that implements the <code>Deserializer</code> interface.</td></tr>
+<tr>
+<td>value.deserializer</td><td>class</td><td></td><td></td><td>high</td><td>Deserializer class for value that implements the <code>Deserializer</code> interface.</td></tr>
+<tr>
+<td>fetch.min.bytes</td><td>int</td><td>1024</td><td>[0,...]</td><td>high</td><td>The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of 1 byte means that fetch requests are answered as soon as a single byte of data is available or the fetch request times out waiting for data to arrive. Setting this to something greater than 1 will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency.</td></tr>
+<tr>
+<td>group.id</td><td>string</td><td>""</td><td></td><td>high</td><td>A unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using <code>subscribe(topic)</code> or the Kafka-based offset management strategy.</td></tr>
+<tr>
+<td>heartbeat.interval.ms</td><td>int</td><td>3000</td><td></td><td>high</td><td>The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.</td></tr>
+<tr>
+<td>max.partition.fetch.bytes</td><td>int</td><td>1048576</td><td>[0,...]</td><td>high</td><td>The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition.</td></tr>
+<tr>
+<td>session.timeout.ms</td><td>int</td><td>30000</td><td></td><td>high</td><td>The timeout used to detect failures when using Kafka's group management facilities.</td></tr>
+<tr>
+<td>ssl.key.password</td><td>string</td><td>null</td><td></td><td>high</td><td>The password of the private key in the key store file. This is optional for client.</td></tr>
+<tr>
+<td>ssl.keystore.location</td><td>string</td><td>null</td><td></td><td>high</td><td>The location of the key store file. This is optional for client and can be used for two-way authentication for client.</td></tr>
+<tr>
+<td>ssl.keystore.password</td><td>string</td><td>null</td><td></td><td>high</td><td>The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured. </td></tr>
+<tr>
+<td>ssl.truststore.location</td><td>string</td><td>null</td><td></td><td>high</td><td>The location of the trust store file. </td></tr>
+<tr>
+<td>ssl.truststore.password</td><td>string</td><td>null</td><td></td><td>high</td><td>The password for the trust store file. </td></tr>
+<tr>
+<td>auto.offset.reset</td><td>string</td><td>latest</td><td>[latest, earliest, none]</td><td>medium</td><td>What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted): <ul><li>earliest: automatically reset the offset to the earliest offset<li>latest: automatically reset the offset to the latest offset</li><li>none: throw exception to the consumer if no previous offset is found for the consumer's group</li><li>anything else: throw exception to the consumer.</li></ul></td></tr>
+<tr>
+<td>connections.max.idle.ms</td><td>long</td><td>540000</td><td></td><td>medium</td><td>Close idle connections after the number of milliseconds specified by this config.</td></tr>
+<tr>
+<td>enable.auto.commit</td><td>boolean</td><td>true</td><td></td><td>medium</td><td>If true the consumer's offset will be periodically committed in the background.</td></tr>
+<tr>
+<td>partition.assignment.strategy</td><td>list</td><td>[org.apache.kafka.clients.consumer.RangeAssignor]</td><td></td><td>medium</td><td>The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used</td></tr>
+<tr>
+<td>receive.buffer.bytes</td><td>int</td><td>32768</td><td>[0,...]</td><td>medium</td><td>The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.</td></tr>
+<tr>
+<td>request.timeout.ms</td><td>int</td><td>40000</td><td>[0,...]</td><td>medium</td><td>The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.</td></tr>
+<tr>
+<td>sasl.kerberos.service.name</td><td>string</td><td>null</td><td></td><td>medium</td><td>The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.</td></tr>
+<tr>
+<td>security.protocol</td><td>string</td><td>PLAINTEXT</td><td></td><td>medium</td><td>Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported.</td></tr>
+<tr>
+<td>send.buffer.bytes</td><td>int</td><td>131072</td><td>[0,...]</td><td>medium</td><td>The size of the TCP send buffer (SO_SNDBUF) to use when sending data.</td></tr>
+<tr>
+<td>ssl.enabled.protocols</td><td>list</td><td>[TLSv1.2, TLSv1.1, TLSv1]</td><td></td><td>medium</td><td>The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.</td></tr>
+<tr>
+<td>ssl.keystore.type</td><td>string</td><td>JKS</td><td></td><td>medium</td><td>The file format of the key store file. This is optional for client. Default value is JKS</td></tr>
+<tr>
+<td>ssl.protocol</td><td>string</td><td>TLS</td><td></td><td>medium</td><td>The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.</td></tr>
+<tr>
+<td>ssl.provider</td><td>string</td><td>null</td><td></td><td>medium</td><td>The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.</td></tr>
+<tr>
+<td>ssl.truststore.type</td><td>string</td><td>JKS</td><td></td><td>medium</td><td>The file format of the trust store file. Default value is JKS.</td></tr>
+<tr>
+<td>auto.commit.interval.ms</td><td>long</td><td>5000</td><td>[0,...]</td><td>low</td><td>The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if <code>enable.auto.commit</code> is set to <code>true</code>.</td></tr>
+<tr>
+<td>check.crcs</td><td>boolean</td><td>true</td><td></td><td>low</td><td>Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.</td></tr>
+<tr>
+<td>client.id</td><td>string</td><td>""</td><td></td><td>low</td><td>An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.</td></tr>
+<tr>
+<td>fetch.max.wait.ms</td><td>int</td><td>500</td><td>[0,...]</td><td>low</td><td>The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes.</td></tr>
+<tr>
+<td>metadata.max.age.ms</td><td>long</td><td>300000</td><td>[0,...]</td><td>low</td><td>The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.</td></tr>
+<tr>
+<td>metric.reporters</td><td>list</td><td>[]</td><td></td><td>low</td><td>A list of classes to use as metrics reporters. Implementing the <code>MetricReporter</code> interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.</td></tr>
+<tr>
+<td>metrics.num.samples</td><td>int</td><td>2</td><td>[1,...]</td><td>low</td><td>The number of samples maintained to compute metrics.</td></tr>
+<tr>
+<td>metrics.sample.window.ms</td><td>long</td><td>30000</td><td>[0,...]</td><td>low</td><td>The number of samples maintained to compute metrics.</td></tr>
+<tr>
+<td>principal.builder.class</td><td>class</td><td>class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder</td><td></td><td>low</td><td>principal builder to generate a java Principal. This config is optional for client.</td></tr>
+<tr>
+<td>reconnect.backoff.ms</td><td>long</td><td>50</td><td>[0,...]</td><td>low</td><td>The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.</td></tr>
+<tr>
+<td>retry.backoff.ms</td><td>long</td><td>100</td><td>[0,...]</td><td>low</td><td>The amount of time to wait before attempting to retry a failed fetch request to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.</td></tr>
+<tr>
+<td>sasl.kerberos.kinit.cmd</td><td>string</td><td>/usr/bin/kinit</td><td></td><td>low</td><td>Kerberos kinit command path. Default is /usr/bin/kinit</td></tr>
+<tr>
+<td>sasl.kerberos.min.time.before.relogin</td><td>long</td><td>60000</td><td></td><td>low</td><td>Login thread sleep time between refresh attempts.</td></tr>
+<tr>
+<td>sasl.kerberos.ticket.renew.jitter</td><td>double</td><td>0.05</td><td></td><td>low</td><td>Percentage of random jitter added to the renewal time.</td></tr>
+<tr>
+<td>sasl.kerberos.ticket.renew.window.factor</td><td>double</td><td>0.8</td><td></td><td>low</td><td>Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.</td></tr>
+<tr>
+<td>ssl.cipher.suites</td><td>list</td><td>null</td><td></td><td>low</td><td>A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.</td></tr>
+<tr>
+<td>ssl.endpoint.identification.algorithm</td><td>string</td><td>null</td><td></td><td>low</td><td>The endpoint identification algorithm to validate server hostname using server certificate. </td></tr>
+<tr>
+<td>ssl.keymanager.algorithm</td><td>string</td><td>SunX509</td><td></td><td>low</td><td>The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.</td></tr>
+<tr>
+<td>ssl.trustmanager.algorithm</td><td>string</td><td>PKIX</td><td></td><td>low</td><td>The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.</td></tr>
+</table>

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/design.html
----------------------------------------------------------------------
diff --git a/090/design.html b/090/design.html
index fbb1d5f..347f602 100644
--- a/090/design.html
+++ b/090/design.html
@@ -1,3 +1,20 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+ 
+    http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <h3><a id="majordesignelements">4.1 Motivation</a></h3>
 <p>
 We designed Kafka to be able to act as a unified platform for handling all the real-time data feeds <a href="#introduction">a large company might have</a>. To do this we had to think through a fairly broad set of use cases.
@@ -10,7 +27,7 @@ It also meant the system would have to handle low-latency delivery to handle mor
 <p>
 We wanted to support partitioned, distributed, real-time processing of these feeds to create new, derived feeds. This motivated our partitioning and consumer model.
 <p>
-Finally in cases where the stream is fed into other data systems for serving we new the system would have to be able to guarantee fault-tolerance in the presence of machine failures.
+Finally in cases where the stream is fed into other data systems for serving we knew the system would have to be able to guarantee fault-tolerance in the presence of machine failures.
 <p>
 Supporting these uses led use to a design with a number of unique elements, more akin to a database log then a traditional messaging system. We will outline some elements of the design in the following sections.
 
@@ -289,7 +306,7 @@ This functionality is inspired by one of LinkedIn's oldest and most successful p
 
 Here is a high-level picture that shows the logical structure of a Kafka log with the offset for each message.
 <p>
-<img src="/images/log_cleaner_anatomy.png">
+<img src="images/log_cleaner_anatomy.png">
 <p>
 The head of the log is identical to a traditional Kafka log. It has dense, sequential offsets and retains all messages. Log compaction adds an option for handling the tail of the log. The picture above shows a log with a compacted tail. Note that the messages in the tail of the log retain the original offset assigned when they were first written&mdash;that never changes. Note also that all offsets remain valid positions in the log, even if the message with that offset has been compacted away; in this case this position is indistinguishable from the next highest offset that does appear in the log. For example, in the picture above the offsets 36, 37, and 38 are all equivalent positions and a read beginning at any of these offsets would return a message set beginning with 38.
 <p>
@@ -297,7 +314,7 @@ Compaction also allows for deletes. A message with a key and a null payload will
 <p>
 The compaction is done in the background by periodically recopying log segments. Cleaning does not block reads and can be throttled to use no more than a configurable amount of I/O throughput to avoid impacting producers and consumers. The actual process of compacting a log segment looks something like this:
 <p>
-<img src="/images/log_compaction.png">
+<img src="images/log_compaction.png">
 <p>
 <h4>What guarantees does log compaction provide?</h4>
 
@@ -336,3 +353,28 @@ Further cleaner configurations are described <a href="/documentation.html#broker
   <li>You cannot configure yet how much log is retained without compaction (the "head" of the log).  Currently all segments are eligible except for the last segment, i.e. the one currently being written to.</li>
   <li>Log compaction is not yet compatible with compressed topics.</li>
 </ol>
+<h3><a id="semantics">4.9 Quotas</a></h3>
+<p>
+    Starting in 0.9, the Kafka cluster has the ability to enforce quotas on produce and fetch requests. Quotas are basically byte-rate thresholds defined per client-id. A client-id logically identifies an application making a request. Hence a single client-id can span multiple producer and consumer instances and the quota will apply for all of them as a single entity i.e. if client-id="test-client" has a produce quota of 10MB/sec, this is shared across all instances with that same id.
+
+<h4>Why are quotas necessary?</h4>
+<p>
+It is possible for producers and consumers to produce/consume very high volumes of data and thus monopolize broker resources, cause network saturation and generally DOS other clients and the brokers themselves. Having quotas protects against these issues and is all tbe more important in large multi-tenant clusters where a small set of badly behaved clients can degrade user experience for the well behaved ones. In fact, when running Kafka as a service this even makes it possible to enforce API limits according to an agreed upon contract.
+</p>
+<h4>Enforcement</h4>
+<p>
+    By default, each unique client-id receives a fixed quota in bytes/sec as configured by the cluster (quota.producer.default, quota.consumer.default).
+    This quota is defined on a per-broker basis. Each client can publish/fetch a maximum of X bytes/sec per broker before it gets throttled. We decided that defining these quotas per broker is much better than having a fixed cluster wide bandwidth per client because that would require a mechanism to share client quota usage among all the brokers. This can be harder to get right than the quota implementation itself!
+</p>
+<p>
+    How does a broker react when it detects a quota violation? In our solution, the broker does not return an error rather it attempts to slow down a client exceeding its quota. It computes the amount of delay needed to bring a guilty client under it's quota and delays the response for that time. This approach keeps the quota violation transparent to clients (outside of client side metrics). This also keeps them from having to implement any special backoff and retry behavior which can get tricky. In fact, bad client behavior (retry without backoff) can exacerbate the very problem quotas are trying to solve.
+</p>
+<p>
+Client byte rate is measured over multiple small windows (for e.g. 30 windows of 1 second each) in order to detect and correct quota violations quickly. Typically, having large measurement windows (for e.g. 10 windows of 30 seconds each) leads to large bursts of traffic followed by long delays which is not great in terms of user experience.
+</p>
+<h4>Quota overrides</h4>
+<p>
+    It is possible to override the default quota for client-ids that need a higher (or even lower) quota. The mechanism is similar to the per-topic log config overrides.
+    Client-id overrides are written to ZooKeeper under <i><b>/config/clients</b></i>. These overrides are read by all brokers and are effective immediately. This lets us change quotas without having to do a rolling restart of the entire cluster. See <a href="/ops.html#quotas">here</a> for details.
+
+</p>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/documentation.html
----------------------------------------------------------------------
diff --git a/090/documentation.html b/090/documentation.html
index 52199d7..c64e67f 100644
--- a/090/documentation.html
+++ b/090/documentation.html
@@ -1,9 +1,26 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <!--#include virtual="../includes/header.html" -->
 
-<h1>Kafka 0.8.2 Documentation</h1>
-Prior releases: <a href="/07/documentation.html">0.7.x</a>, <a href="/08/documentation.html">0.8.0</a>, <a href="/081/documentation.html">0.8.1.X</a>.
+<h1>Kafka 0.9.0 Documentation</h1>
+Prior releases: <a href="/07/documentation.html">0.7.x</a>, <a href="/08/documentation.html">0.8.0</a>, <a href="/081/documentation.html">0.8.1.X</a>, <a href="/082/documentation.html">0.8.2.X</a>.
 </ul>
-    
+
 <ul class="toc">
     <li><a href="#gettingStarted">1. Getting Started</a>
          <ul>
@@ -13,20 +30,24 @@ Prior releases: <a href="/07/documentation.html">0.7.x</a>, <a href="/08/documen
              <li><a href="#ecosystem">1.4 Ecosystem</a>
              <li><a href="#upgrade">1.5 Upgrading</a>
          </ul>
+    </li>
     <li><a href="#api">2. API</a>
           <ul>
               <li><a href="#producerapi">2.1 Producer API</a>
               <li><a href="#highlevelconsumerapi">2.2 High Level Consumer API</a>
               <li><a href="#simpleconsumerapi">2.3 Simple Consumer API</a>
-              <li><a href="#kafkahadoopconsumerapi">2.4 Kafka Hadoop Consumer API</a>
+              <li><a href="#newconsumerapi">2.4 New Consumer API</a>
           </ul>
+    </li>
     <li><a href="#configuration">3. Configuration</a>
         <ul>
-             <li><a href="#brokerconfigs">3.1 Broker Configs</a>
-             <li><a href="#consumerconfigs">3.2 Consumer Configs</a>
-             <li><a href="#producerconfigs">3.3 Producer Configs</a>
-			 <li><a href="#newproducerconfigs">3.4 New Producer Configs</a>
+            <li><a href="#brokerconfigs">3.1 Broker Configs</a>
+            <li><a href="#producerconfigs">3.2 Producer Configs</a>
+            <li><a href="#consumerconfigs">3.3 Consumer Configs</a>
+            <li><a href="#newconsumerconfigs">3.4 New Consumer Configs</a>
+            <li><a href="#connectconfigs">3.5 Kafka Connect Configs</a>
         </ul>
+    </li>
     <li><a href="#design">4. Design</a>
         <ul>
              <li><a href="#majordesignelements">4.1 Motivation</a>
@@ -38,6 +59,7 @@ Prior releases: <a href="/07/documentation.html">0.7.x</a>, <a href="/08/documen
              <li><a href="#replication">4.7 Replication</a>
              <li><a href="#compaction">4.8 Log Compaction</a>
         </ul>
+    </li>
     <li><a href="#implementation">5. Implementation</a>
         <ul>
               <li><a href="#apidesign">5.1 API Design</a>
@@ -47,6 +69,7 @@ Prior releases: <a href="/07/documentation.html">0.7.x</a>, <a href="/08/documen
               <li><a href="#log">5.5 Log</a>
               <li><a href="#distributionimpl">5.6 Distribution</a>
         </ul>
+    </li>
     <li><a href="#operations">6. Operations</a>
         <ul>
              <li><a href="#basic_ops">6.1 Basic Kafka Operations</a>
@@ -84,6 +107,22 @@ Prior releases: <a href="/07/documentation.html">0.7.x</a>, <a href="/08/documen
                     <li><a href="#zkops">Operationalization</a>
                 </ul>
         </ul>
+    </li>
+    <li><a href="#security">7. Security</a>
+        <ul>
+            <li><a href="#security_overview">7.1 Security Overview</a></li>
+            <li><a href="#security_ssl">7.2 Encryption and Authentication using SSL</a></li>
+            <li><a href="#security_sasl">7.3 Authentication using SASL</a></li>
+            <li><a href="#security_authz">7.4 Authorization and ACLs</a></li>
+        </ul>
+    </li>
+    <li><a href="#connect">8. Kafka Connect</a>
+        <ul>
+            <li><a href="#connect_overview">8.1 Overview</a></li>
+            <li><a href="#connect_user">8.2 User Guide</a></li>
+            <li><a href="#connect_development">8.3 Connector Development Guide</a></li>
+        </ul>
+    </li>
 </ul>
 
 <h2><a id="gettingStarted">1. Getting Started</a></h2>
@@ -113,4 +152,10 @@ Prior releases: <a href="/07/documentation.html">0.7.x</a>, <a href="/08/documen
 
 <!--#include virtual="ops.html" -->
 
+<h2><a id="security">7. Security</a></h2>
+<!--#include virtual="security.html" -->
+
+<h2><a id="connect">8. Kafka Connect</a></h2>
+<!--#include virtual="connect.html" -->
+
 <!--#include virtual="../includes/footer.html" -->

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/ecosystem.html
----------------------------------------------------------------------
diff --git a/090/ecosystem.html b/090/ecosystem.html
index eb41338..e99a446 100644
--- a/090/ecosystem.html
+++ b/090/ecosystem.html
@@ -1,3 +1,20 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+ 
+    http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <h3><a id="ecosystem">1.4 Ecosystem</a></h3>
 
 There are a plethora of tools that integrate with Kafka outside the main distribution. The <a href="https://cwiki.apache.org/confluence/display/KAFKA/Ecosystem"> ecosystem page</a> lists many of these, including stream processing systems, Hadoop integration, monitoring, and deployment tools.

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/consumer-groups.png
----------------------------------------------------------------------
diff --git a/090/images/consumer-groups.png b/090/images/consumer-groups.png
new file mode 100644
index 0000000..16fe293
Binary files /dev/null and b/090/images/consumer-groups.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/kafka_log.png
----------------------------------------------------------------------
diff --git a/090/images/kafka_log.png b/090/images/kafka_log.png
new file mode 100644
index 0000000..75abd96
Binary files /dev/null and b/090/images/kafka_log.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/kafka_multidc.png
----------------------------------------------------------------------
diff --git a/090/images/kafka_multidc.png b/090/images/kafka_multidc.png
new file mode 100644
index 0000000..7bc56f4
Binary files /dev/null and b/090/images/kafka_multidc.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/kafka_multidc_complex.png
----------------------------------------------------------------------
diff --git a/090/images/kafka_multidc_complex.png b/090/images/kafka_multidc_complex.png
new file mode 100644
index 0000000..ab88deb
Binary files /dev/null and b/090/images/kafka_multidc_complex.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/log_anatomy.png
----------------------------------------------------------------------
diff --git a/090/images/log_anatomy.png b/090/images/log_anatomy.png
new file mode 100644
index 0000000..a649499
Binary files /dev/null and b/090/images/log_anatomy.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/log_cleaner_anatomy.png
----------------------------------------------------------------------
diff --git a/090/images/log_cleaner_anatomy.png b/090/images/log_cleaner_anatomy.png
new file mode 100644
index 0000000..fb425b0
Binary files /dev/null and b/090/images/log_cleaner_anatomy.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/log_compaction.png
----------------------------------------------------------------------
diff --git a/090/images/log_compaction.png b/090/images/log_compaction.png
new file mode 100644
index 0000000..4e4a833
Binary files /dev/null and b/090/images/log_compaction.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/mirror-maker.png
----------------------------------------------------------------------
diff --git a/090/images/mirror-maker.png b/090/images/mirror-maker.png
new file mode 100644
index 0000000..8f76b1f
Binary files /dev/null and b/090/images/mirror-maker.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/producer_consumer.png
----------------------------------------------------------------------
diff --git a/090/images/producer_consumer.png b/090/images/producer_consumer.png
new file mode 100644
index 0000000..4b10cc9
Binary files /dev/null and b/090/images/producer_consumer.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/images/tracking_high_level.png
----------------------------------------------------------------------
diff --git a/090/images/tracking_high_level.png b/090/images/tracking_high_level.png
new file mode 100644
index 0000000..b643230
Binary files /dev/null and b/090/images/tracking_high_level.png differ

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/implementation.html
----------------------------------------------------------------------
diff --git a/090/implementation.html b/090/implementation.html
index 3b878af..b95d36f 100644
--- a/090/implementation.html
+++ b/090/implementation.html
@@ -1,12 +1,29 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <h3><a id="apidesign">5.1 API Design</a></h3>
 
 <h4>Producer APIs</h4>
 
 <p>
-The Producer API that wraps the 2 low-level producers - <code>kafka.producer.SyncProducer</code> and <code>kafka.producer.async.AsyncProducer</code>. 
+The Producer API that wraps the 2 low-level producers - <code>kafka.producer.SyncProducer</code> and <code>kafka.producer.async.AsyncProducer</code>.
 <pre>
 class Producer<T> {
-	
+
   /* Sends the data, partitioned by key to the topic using either the */
   /* synchronous or the asynchronous producer */
   public void send(kafka.javaapi.producer.ProducerData&lt;K,V&gt; producerData);
@@ -15,21 +32,21 @@ class Producer<T> {
   /* the synchronous or the asynchronous producer */
   public void send(java.util.List&lt;kafka.javaapi.producer.ProducerData&lt;K,V&gt;&gt; producerData);
 
-  /* Closes the producer and cleans up */	
+  /* Closes the producer and cleans up */
   public void close();
 
 }
 </pre>
 
-The goal is to expose all the producer functionality through a single API to the client.  
+The goal is to expose all the producer functionality through a single API to the client.
 
 The new producer -
 <ul>
-<li>can handle queueing/buffering of multiple producer requests and asynchronous dispatch of the batched data - 	
+<li>can handle queueing/buffering of multiple producer requests and asynchronous dispatch of the batched data -
 <p><code>kafka.producer.Producer</code> provides the ability to batch multiple produce requests (<code>producer.type=async</code>), before serializing and dispatching them to the appropriate kafka broker partition. The size of the batch can be controlled by a few config parameters. As events enter a queue, they are buffered in a queue, until either <code>queue.time</code> or <code>batch.size</code> is reached. A background thread (<code>kafka.producer.async.ProducerSendThread</code>) dequeues the batch of data and lets the <code>kafka.producer.EventHandler</code> serialize and send the data to the appropriate kafka broker partition. A custom event handler can be plugged in through the <code>event.handler</code> config parameter. At various stages of this producer queue pipeline, it is helpful to be able to inject callbacks, either for plugging in custom logging/tracing code or custom monitoring logic. This is possible by implementing the <code>kafka.producer.async.CallbackHandler</c
 ode> interface and setting <code>callback.handler</code> config parameter to that class.
 </p>
 </li>
-<li>handles the serialization of data through a user-specified <code>Encoder</code> - 
+<li>handles the serialization of data through a user-specified <code>Encoder</code> -
 <pre>
 interface Encoder&lt;T&gt; {
   public Message toMessage(T data);
@@ -37,15 +54,15 @@ interface Encoder&lt;T&gt; {
 </pre>
 <p>The default is the no-op <code>kafka.serializer.DefaultEncoder</code></p>
 </li>
-<li>provides software load balancing through an optionally user-specified <code>Partitioner</code> - 
+<li>provides software load balancing through an optionally user-specified <code>Partitioner</code> -
 <p>
-The routing decision is influenced by the <code>kafka.producer.Partitioner</code>. 
+The routing decision is influenced by the <code>kafka.producer.Partitioner</code>.
 <pre>
 interface Partitioner&lt;T&gt; {
    int partition(T key, int numPartitions);
 }
 </pre>
-The partition API uses the key and the number of available broker partitions to return a partition id. This id is used as an index into a sorted list of broker_ids and partitions to pick a broker partition for the producer request. The default partitioning strategy is <code>hash(key)%numPartitions</code>. If the key is null, then a random broker partition is picked. A custom partitioning strategy can also be plugged in using the <code>partitioner.class</code> config parameter.	
+The partition API uses the key and the number of available broker partitions to return a partition id. This id is used as an index into a sorted list of broker_ids and partitions to pick a broker partition for the producer request. The default partitioning strategy is <code>hash(key)%numPartitions</code>. If the key is null, then a random broker partition is picked. A custom partitioning strategy can also be plugged in using the <code>partitioner.class</code> config parameter.
 </p>
 </li>
 </ul>
@@ -62,11 +79,11 @@ The high-level API hides the details of brokers from the consumer and allows con
 <h5>Low-level API</h5>
 <pre>
 class SimpleConsumer {
-	
-  /* Send fetch request to a broker and get back a set of messages. */ 
+
+  /* Send fetch request to a broker and get back a set of messages. */
   public ByteBufferMessageSet fetch(FetchRequest request);
 
-  /* Send a list of fetch requests to a broker and get back a response set. */ 
+  /* Send a list of fetch requests to a broker and get back a response set. */
   public MultiFetchResponse multifetch(List&lt;FetchRequest&gt; fetches);
 
   /**
@@ -80,16 +97,16 @@ class SimpleConsumer {
 }
 </pre>
 
-The low-level API is used to implement the high-level API as well as being used directly for some of our offline consumers (such as the hadoop consumer) which have particular requirements around maintaining state.
+The low-level API is used to implement the high-level API as well as being used directly for some of our offline consumers which have particular requirements around maintaining state.
 
 <h5>High-level API</h5>
 <pre>
 
-/* create a connection to the cluster */ 
+/* create a connection to the cluster */
 ConsumerConnector connector = Consumer.create(consumerConfig);
 
 interface ConsumerConnector {
-	
+
   /**
    * This method is used to get a list of KafkaStreams, which are iterators over
    * MessageAndMetadata objects from which you can obtain messages and their
@@ -97,7 +114,7 @@ interface ConsumerConnector {
    *  Input: a map of &lt;topic, #streams&gt;
    *  Output: a map of &lt;topic, list of message streams&gt;
    */
-  public Map&lt;String,List&lt;KafkaStream&gt;&gt; createMessageStreams(Map&lt;String,Int&gt; topicCountMap); 
+  public Map&lt;String,List&lt;KafkaStream&gt;&gt; createMessageStreams(Map&lt;String,Int&gt; topicCountMap);
 
   /**
    * You can also obtain a list of KafkaStreams, that iterate over messages
@@ -109,7 +126,7 @@ interface ConsumerConnector {
 
   /* Commit the offsets of all messages consumed so far. */
   public commitOffsets()
-  
+
   /* Shut down the connector */
   public shutdown()
 }
@@ -132,27 +149,27 @@ Messages consist of a fixed-size header and variable length opaque byte array pa
 <h3><a id="messageformat">5.4 Message Format</a></h3>
 
 <pre>
-	/** 
-	 * A message. The format of an N byte message is the following: 
-	 * 
-	 * If magic byte is 0 
-	 * 
-	 * 1. 1 byte "magic" identifier to allow format changes 
-	 * 
-	 * 2. 4 byte CRC32 of the payload 
-	 * 
-	 * 3. N - 5 byte payload 
-	 * 
-	 * If magic byte is 1 
-	 * 
-	 * 1. 1 byte "magic" identifier to allow format changes 
-	 * 
-	 * 2. 1 byte "attributes" identifier to allow annotations on the message independent of the version (e.g. compression enabled, type of codec used) 
-	 * 
-	 * 3. 4 byte CRC32 of the payload 
-	 * 
-	 * 4. N - 6 byte payload 
-	 * 
+	/**
+	 * A message. The format of an N byte message is the following:
+	 *
+	 * If magic byte is 0
+	 *
+	 * 1. 1 byte "magic" identifier to allow format changes
+	 *
+	 * 2. 4 byte CRC32 of the payload
+	 *
+	 * 3. N - 5 byte payload
+	 *
+	 * If magic byte is 1
+	 *
+	 * 1. 1 byte "magic" identifier to allow format changes
+	 *
+	 * 2. 1 byte "attributes" identifier to allow annotations on the message independent of the version (e.g. compression enabled, type of codec used)
+	 *
+	 * 3. 4 byte CRC32 of the payload
+	 *
+	 * 4. N - 6 byte payload
+	 *
 	 */
 </pre>
 </p>
@@ -166,7 +183,7 @@ The exact binary format for messages is versioned and maintained as a standard i
 <pre>
 On-disk format of a message
 
-message length : 4 bytes (value: 1+4+n) 
+message length : 4 bytes (value: 1+4+n)
 "magic" value  : 1 byte
 crc            : 4 bytes
 payload        : n bytes
@@ -174,7 +191,7 @@ payload        : n bytes
 <p>
 The use of the message offset as the message id is unusual. Our original idea was to use a GUID generated by the producer, and maintain a mapping from GUID to offset on each broker. But since a consumer must maintain an ID for each server, the global uniqueness of the GUID provides no value. Furthermore the complexity of maintaining the mapping from a random id to an offset requires a heavy weight index structure which must be synchronized with disk, essentially requiring a full persistent random-access data structure. Thus to simplify the lookup structure we decided to use a simple per-partition atomic counter which could be coupled with the partition id and node id to uniquely identify a message; this makes the lookup structure simpler, though multiple seeks per consumer request are still likely. However once we settled on a counter, the jump to directly using the offset seemed natural&mdash;both after all are monotonically increasing integers unique to a partition. Since the offs
 et is hidden from the consumer API this decision is ultimately an implementation detail and we went with the more efficient approach.
 </p>
-<img src="../images/kafka_log.png">
+<img src="images/kafka_log.png">
 <h4>Writes</h4>
 <p>
 The log allows serial appends which always go to the last file. This file is rolled over to a fresh file when it reaches a configurable size (say 1GB). The log takes two configuration parameter <i>M</i> which gives the number of messages to write before forcing the OS to flush the file to disk, and <i>S</i> which gives a number of seconds after which a flush is forced. This gives a durability guarantee of losing at most <i>M</i> messages or <i>S</i> seconds of data in the event of a system crash.
@@ -272,7 +289,7 @@ When an element in a path is denoted [xyz], that means that the value of xyz is
 This is a list of all present broker nodes, each of which provides a unique logical broker id which identifies it to consumers (which must be given as part of its configuration). On startup, a broker node registers itself by creating a znode with the logical broker id under /brokers/ids. The purpose of the logical broker id is to allow a broker to be moved to a different physical machine without affecting consumers. An attempt to register a broker id that is already in use (say because two servers are configured with the same broker id) is an error.
 </p>
 <p>
-Since the broker registers itself in ZooKeeper using ephemeral znodes, this registration is dynamic and will disappear if the broker is shutdown or dies (thus notifying consumers it is no longer available).	
+Since the broker registers itself in ZooKeeper using ephemeral znodes, this registration is dynamic and will disappear if the broker is shutdown or dies (thus notifying consumers it is no longer available).
 </p>
 <h4>Broker Topic Registry</h4>
 <pre>
@@ -289,7 +306,7 @@ Consumers of topics also register themselves in ZooKeeper, in order to coordinat
 </p>
 
 <p>
-Multiple consumers can form a group and jointly consume a single topic. Each consumer in the same group is given a shared group_id. 
+Multiple consumers can form a group and jointly consume a single topic. Each consumer in the same group is given a shared group_id.
 For example if one consumer is your foobar process, which is run across three machines, then you might assign this group of consumers the id "foobar". This group id is provided in the configuration of the consumer, and is your way to tell the consumer which group it belongs to.
 </p>
 
@@ -354,7 +371,7 @@ The consumer rebalancing algorithms allows all the consumers in a group to come
 Each consumer does the following during rebalancing:
 </p>
 <pre>
-   1. For each topic T that C<sub>i</sub> subscribes to 
+   1. For each topic T that C<sub>i</sub> subscribes to
    2.   let P<sub>T</sub> be all partitions producing topic T
    3.   let C<sub>G</sub> be all consumers in the same group as C<sub>i</sub> that consume topic T
    4.   sort P<sub>T</sub> (so partitions on the same broker are clustered together)

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/introduction.html
----------------------------------------------------------------------
diff --git a/090/introduction.html b/090/introduction.html
index a182182..7e0b150 100644
--- a/090/introduction.html
+++ b/090/introduction.html
@@ -1,3 +1,20 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+ 
+    http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <h3><a id="introduction">1.1 Introduction</a></h3>
 Kafka is a distributed, partitioned, replicated commit log service. It provides the functionality of a messaging system, but with a unique design.
 <p>
@@ -13,7 +30,7 @@ First let's review some basic messaging terminology:
 
 So, at a high level, producers send messages over the network to the Kafka cluster which in turn serves them up to consumers like this:
 <div style="text-align: center; width: 100%">
-  <img src="../images/producer_consumer.png">
+  <img src="images/producer_consumer.png">
 </div>
 
 Communication between the clients and the servers is done with a simple, high-performance, language agnostic <a href="https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol">TCP protocol</a>. We provide a Java client for Kafka, but clients are available in <a href="https://cwiki.apache.org/confluence/display/KAFKA/Clients">many languages</a>.
@@ -23,7 +40,7 @@ Let's first dive into the high-level abstraction Kafka provides&mdash;the topic.
 <p>
 A topic is a category or feed name to which messages are published. For each topic, the Kafka cluster maintains a partitioned log that looks like this:
 <div style="text-align: center; width: 100%">
-  <img src="../images/log_anatomy.png">
+  <img src="images/log_anatomy.png">
 </div>
 Each partition is an ordered, immutable sequence of messages that is continually appended to&mdash;a commit log. The messages in the partitions are each assigned a sequential id number called the <i>offset</i> that uniquely identifies each message within the partition.
 <p>
@@ -59,7 +76,7 @@ More commonly, however, we have found that topics have a small number of consume
 <p>
 
 <div style="float: right; margin: 20px; width: 500px" class="caption">
-  <img src="../images/consumer-groups.png"><br>
+  <img src="images/consumer-groups.png"><br>
   A two server Kafka cluster hosting four partitions (P0-P3) with two consumer groups. Consumer group A has two consumer instances and group B has four.
 </div>
 <p>

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/kafka_config.html
----------------------------------------------------------------------
diff --git a/090/kafka_config.html b/090/kafka_config.html
new file mode 100644
index 0000000..2125b6e
--- /dev/null
+++ b/090/kafka_config.html
@@ -0,0 +1,268 @@
+<table>
+<tr>
+<th>Name</th>
+<th>Type</th>
+<th>Default</th>
+<th>Valid Values</th>
+<th>Importance</th>
+<th>Description</th>
+</tr>
+<tr>
+<td>zookeeper.connect</td><td>string</td><td></td><td></td><td>high</td><td>Zookeeper host string</td></tr>
+<tr>
+<td>advertised.host.name</td><td>string</td><td>null</td><td></td><td>high</td><td>Hostname to publish to ZooKeeper for clients to use. In IaaS environments, this may need to be different from the interface to which the broker binds. If this is not set, it will use the value for "host.name" if configured. Otherwise it will use the value returned from java.net.InetAddress.getCanonicalHostName().</td></tr>
+<tr>
+<td>advertised.listeners</td><td>string</td><td>null</td><td></td><td>high</td><td>Listeners to publish to ZooKeeper for clients to use, if different than the listeners above. In IaaS environments, this may need to be different from the interface to which the broker binds. If this is not set, the value for "listeners" will be used.</td></tr>
+<tr>
+<td>advertised.port</td><td>int</td><td>null</td><td></td><td>high</td><td>The port to publish to ZooKeeper for clients to use. In IaaS environments, this may need to be different from the port to which the broker binds. If this is not set, it will publish the same port that the broker binds to.</td></tr>
+<tr>
+<td>auto.create.topics.enable</td><td>boolean</td><td>true</td><td></td><td>high</td><td>Enable auto creation of topic on the server</td></tr>
+<tr>
+<td>auto.leader.rebalance.enable</td><td>boolean</td><td>true</td><td></td><td>high</td><td>Enables auto leader balancing. A background thread checks and triggers leader balance if required at regular intervals</td></tr>
+<tr>
+<td>background.threads</td><td>int</td><td>10</td><td>[1,...]</td><td>high</td><td>The number of threads to use for various background processing tasks</td></tr>
+<tr>
+<td>broker.id</td><td>int</td><td>-1</td><td></td><td>high</td><td>The broker id for this server. To avoid conflicts between zookeeper generated brokerId and user's config.brokerId added MaxReservedBrokerId and zookeeper sequence starts from MaxReservedBrokerId + 1.</td></tr>
+<tr>
+<td>compression.type</td><td>string</td><td>producer</td><td></td><td>high</td><td>Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', lz4). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.</td></tr>
+<tr>
+<td>delete.topic.enable</td><td>boolean</td><td>false</td><td></td><td>high</td><td>Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off</td></tr>
+<tr>
+<td>host.name</td><td>string</td><td>""</td><td></td><td>high</td><td>hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces</td></tr>
+<tr>
+<td>leader.imbalance.check.interval.seconds</td><td>long</td><td>300</td><td></td><td>high</td><td>The frequency with which the partition rebalance check is triggered by the controller</td></tr>
+<tr>
+<td>leader.imbalance.per.broker.percentage</td><td>int</td><td>10</td><td></td><td>high</td><td>The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage.</td></tr>
+<tr>
+<td>listeners</td><td>string</td><td>null</td><td></td><td>high</td><td>Listener List - Comma-separated list of URIs we will listen on and their protocols.
+ Specify hostname as 0.0.0.0 to bind to all interfaces.
+ Leave hostname empty to bind to default interface.
+ Examples of legal listener lists:
+ PLAINTEXT://myhost:9092,TRACE://:9091
+ PLAINTEXT://0.0.0.0:9092, TRACE://localhost:9093
+</td></tr>
+<tr>
+<td>log.dir</td><td>string</td><td>/tmp/kafka-logs</td><td></td><td>high</td><td>The directory in which the log data is kept (supplemental for log.dirs property)</td></tr>
+<tr>
+<td>log.dirs</td><td>string</td><td>null</td><td></td><td>high</td><td>The directories in which the log data is kept. If not set, the value in log.dir is used</td></tr>
+<tr>
+<td>log.flush.interval.messages</td><td>long</td><td>9223372036854775807</td><td>[1,...]</td><td>high</td><td>The number of messages accumulated on a log partition before messages are flushed to disk </td></tr>
+<tr>
+<td>log.flush.interval.ms</td><td>long</td><td>null</td><td></td><td>high</td><td>The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used</td></tr>
+<tr>
+<td>log.flush.offset.checkpoint.interval.ms</td><td>int</td><td>60000</td><td>[0,...]</td><td>high</td><td>The frequency with which we update the persistent record of the last flush which acts as the log recovery point</td></tr>
+<tr>
+<td>log.flush.scheduler.interval.ms</td><td>long</td><td>9223372036854775807</td><td></td><td>high</td><td>The frequency in ms that the log flusher checks whether any log needs to be flushed to disk</td></tr>
+<tr>
+<td>log.retention.bytes</td><td>long</td><td>-1</td><td></td><td>high</td><td>The maximum size of the log before deleting it</td></tr>
+<tr>
+<td>log.retention.hours</td><td>int</td><td>168</td><td></td><td>high</td><td>The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property</td></tr>
+<tr>
+<td>log.retention.minutes</td><td>int</td><td>null</td><td></td><td>high</td><td>The number of minutes to keep a log file before deleting it (in minutes), secondary to log.retention.ms property. If not set, the value in log.retention.hours is used</td></tr>
+<tr>
+<td>log.retention.ms</td><td>long</td><td>null</td><td></td><td>high</td><td>The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used</td></tr>
+<tr>
+<td>log.roll.hours</td><td>int</td><td>168</td><td>[1,...]</td><td>high</td><td>The maximum time before a new log segment is rolled out (in hours), secondary to log.roll.ms property</td></tr>
+<tr>
+<td>log.roll.jitter.hours</td><td>int</td><td>0</td><td>[0,...]</td><td>high</td><td>The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to log.roll.jitter.ms property</td></tr>
+<tr>
+<td>log.roll.jitter.ms</td><td>long</td><td>null</td><td></td><td>high</td><td>The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used</td></tr>
+<tr>
+<td>log.roll.ms</td><td>long</td><td>null</td><td></td><td>high</td><td>The maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in log.roll.hours is used</td></tr>
+<tr>
+<td>log.segment.bytes</td><td>int</td><td>1073741824</td><td>[14,...]</td><td>high</td><td>The maximum size of a single log file</td></tr>
+<tr>
+<td>log.segment.delete.delay.ms</td><td>long</td><td>60000</td><td>[0,...]</td><td>high</td><td>The amount of time to wait before deleting a file from the filesystem</td></tr>
+<tr>
+<td>message.max.bytes</td><td>int</td><td>1000012</td><td>[0,...]</td><td>high</td><td>The maximum size of message that the server can receive</td></tr>
+<tr>
+<td>min.insync.replicas</td><td>int</td><td>1</td><td>[1,...]</td><td>high</td><td>define the minimum number of replicas in ISR needed to satisfy a produce request with required.acks=-1 (or all)</td></tr>
+<tr>
+<td>num.io.threads</td><td>int</td><td>8</td><td>[1,...]</td><td>high</td><td>The number of io threads that the server uses for carrying out network requests</td></tr>
+<tr>
+<td>num.network.threads</td><td>int</td><td>3</td><td>[1,...]</td><td>high</td><td>the number of network threads that the server uses for handling network requests</td></tr>
+<tr>
+<td>num.recovery.threads.per.data.dir</td><td>int</td><td>1</td><td>[1,...]</td><td>high</td><td>The number of threads per data directory to be used for log recovery at startup and flushing at shutdown</td></tr>
+<tr>
+<td>num.replica.fetchers</td><td>int</td><td>1</td><td></td><td>high</td><td>Number of fetcher threads used to replicate messages from a source broker. Increasing this value can increase the degree of I/O parallelism in the follower broker.</td></tr>
+<tr>
+<td>offset.metadata.max.bytes</td><td>int</td><td>4096</td><td></td><td>high</td><td>The maximum size for a metadata entry associated with an offset commit</td></tr>
+<tr>
+<td>offsets.commit.required.acks</td><td>short</td><td>-1</td><td></td><td>high</td><td>The required acks before the commit can be accepted. In general, the default (-1) should not be overridden</td></tr>
+<tr>
+<td>offsets.commit.timeout.ms</td><td>int</td><td>5000</td><td>[1,...]</td><td>high</td><td>Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout.</td></tr>
+<tr>
+<td>offsets.load.buffer.size</td><td>int</td><td>5242880</td><td>[1,...]</td><td>high</td><td>Batch size for reading from the offsets segments when loading offsets into the cache.</td></tr>
+<tr>
+<td>offsets.retention.check.interval.ms</td><td>long</td><td>600000</td><td>[1,...]</td><td>high</td><td>Frequency at which to check for stale offsets</td></tr>
+<tr>
+<td>offsets.retention.minutes</td><td>int</td><td>1440</td><td>[1,...]</td><td>high</td><td>Log retention window in minutes for offsets topic</td></tr>
+<tr>
+<td>offsets.topic.compression.codec</td><td>int</td><td>0</td><td></td><td>high</td><td>Compression codec for the offsets topic - compression may be used to achieve "atomic" commits</td></tr>
+<tr>
+<td>offsets.topic.num.partitions</td><td>int</td><td>50</td><td>[1,...]</td><td>high</td><td>The number of partitions for the offset commit topic (should not change after deployment)</td></tr>
+<tr>
+<td>offsets.topic.replication.factor</td><td>short</td><td>3</td><td>[1,...]</td><td>high</td><td>The replication factor for the offsets topic (set higher to ensure availability). To ensure that the effective replication factor of the offsets topic is the configured value, the number of alive brokers has to be at least the replication factor at the time of the first request for the offsets topic. If not, either the offsets topic creation will fail or it will get a replication factor of min(alive brokers, configured replication factor)</td></tr>
+<tr>
+<td>offsets.topic.segment.bytes</td><td>int</td><td>104857600</td><td>[1,...]</td><td>high</td><td>The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads</td></tr>
+<tr>
+<td>port</td><td>int</td><td>9092</td><td></td><td>high</td><td>the port to listen and accept connections on</td></tr>
+<tr>
+<td>queued.max.requests</td><td>int</td><td>500</td><td>[1,...]</td><td>high</td><td>The number of queued requests allowed before blocking the network threads</td></tr>
+<tr>
+<td>quota.consumer.default</td><td>long</td><td>9223372036854775807</td><td>[1,...]</td><td>high</td><td>Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second</td></tr>
+<tr>
+<td>quota.producer.default</td><td>long</td><td>9223372036854775807</td><td>[1,...]</td><td>high</td><td>Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second</td></tr>
+<tr>
+<td>replica.fetch.max.bytes</td><td>int</td><td>1048576</td><td></td><td>high</td><td>The number of byes of messages to attempt to fetch</td></tr>
+<tr>
+<td>replica.fetch.min.bytes</td><td>int</td><td>1</td><td></td><td>high</td><td>Minimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs</td></tr>
+<tr>
+<td>replica.fetch.wait.max.ms</td><td>int</td><td>500</td><td></td><td>high</td><td>max wait time for each fetcher request issued by follower replicas. This value should always be less than the replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics</td></tr>
+<tr>
+<td>replica.high.watermark.checkpoint.interval.ms</td><td>long</td><td>5000</td><td></td><td>high</td><td>The frequency with which the high watermark is saved out to disk</td></tr>
+<tr>
+<td>replica.lag.time.max.ms</td><td>long</td><td>10000</td><td></td><td>high</td><td>If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time, the leader will remove the follower from isr</td></tr>
+<tr>
+<td>replica.socket.receive.buffer.bytes</td><td>int</td><td>65536</td><td></td><td>high</td><td>The socket receive buffer for network requests</td></tr>
+<tr>
+<td>replica.socket.timeout.ms</td><td>int</td><td>30000</td><td></td><td>high</td><td>The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms</td></tr>
+<tr>
+<td>request.timeout.ms</td><td>int</td><td>30000</td><td></td><td>high</td><td>The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.</td></tr>
+<tr>
+<td>socket.receive.buffer.bytes</td><td>int</td><td>102400</td><td></td><td>high</td><td>The SO_RCVBUF buffer of the socket sever sockets</td></tr>
+<tr>
+<td>socket.request.max.bytes</td><td>int</td><td>104857600</td><td>[1,...]</td><td>high</td><td>The maximum number of bytes in a socket request</td></tr>
+<tr>
+<td>socket.send.buffer.bytes</td><td>int</td><td>102400</td><td></td><td>high</td><td>The SO_SNDBUF buffer of the socket sever sockets</td></tr>
+<tr>
+<td>unclean.leader.election.enable</td><td>boolean</td><td>true</td><td></td><td>high</td><td>Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss</td></tr>
+<tr>
+<td>zookeeper.connection.timeout.ms</td><td>int</td><td>null</td><td></td><td>high</td><td>The max time that the client waits to establish a connection to zookeeper. If not set, the value in zookeeper.session.timeout.ms is used</td></tr>
+<tr>
+<td>zookeeper.session.timeout.ms</td><td>int</td><td>6000</td><td></td><td>high</td><td>Zookeeper session timeout</td></tr>
+<tr>
+<td>zookeeper.set.acl</td><td>boolean</td><td>false</td><td></td><td>high</td><td>Set client to use secure ACLs</td></tr>
+<tr>
+<td>connections.max.idle.ms</td><td>long</td><td>600000</td><td></td><td>medium</td><td>Idle connections timeout: the server socket processor threads close the connections that idle more than this</td></tr>
+<tr>
+<td>controlled.shutdown.enable</td><td>boolean</td><td>true</td><td></td><td>medium</td><td>Enable controlled shutdown of the server</td></tr>
+<tr>
+<td>controlled.shutdown.max.retries</td><td>int</td><td>3</td><td></td><td>medium</td><td>Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens</td></tr>
+<tr>
+<td>controlled.shutdown.retry.backoff.ms</td><td>long</td><td>5000</td><td></td><td>medium</td><td>Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying.</td></tr>
+<tr>
+<td>controller.socket.timeout.ms</td><td>int</td><td>30000</td><td></td><td>medium</td><td>The socket timeout for controller-to-broker channels</td></tr>
+<tr>
+<td>default.replication.factor</td><td>int</td><td>1</td><td></td><td>medium</td><td>default replication factors for automatically created topics</td></tr>
+<tr>
+<td>fetch.purgatory.purge.interval.requests</td><td>int</td><td>1000</td><td></td><td>medium</td><td>The purge interval (in number of requests) of the fetch request purgatory</td></tr>
+<tr>
+<td>group.max.session.timeout.ms</td><td>int</td><td>30000</td><td></td><td>medium</td><td>The maximum allowed session timeout for registered consumers</td></tr>
+<tr>
+<td>group.min.session.timeout.ms</td><td>int</td><td>6000</td><td></td><td>medium</td><td>The minimum allowed session timeout for registered consumers</td></tr>
+<tr>
+<td>inter.broker.protocol.version</td><td>string</td><td>0.9.0.X</td><td></td><td>medium</td><td>Specify which version of the inter-broker protocol will be used.
+ This is typically bumped after all brokers were upgraded to a new version.
+ Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check ApiVersion for the full list.</td></tr>
+<tr>
+<td>log.cleaner.backoff.ms</td><td>long</td><td>15000</td><td>[0,...]</td><td>medium</td><td>The amount of time to sleep when there are no logs to clean</td></tr>
+<tr>
+<td>log.cleaner.dedupe.buffer.size</td><td>long</td><td>524288000</td><td></td><td>medium</td><td>The total memory used for log deduplication across all cleaner threads</td></tr>
+<tr>
+<td>log.cleaner.delete.retention.ms</td><td>long</td><td>86400000</td><td></td><td>medium</td><td>How long are delete records retained?</td></tr>
+<tr>
+<td>log.cleaner.enable</td><td>boolean</td><td>false</td><td></td><td>medium</td><td>Should we enable log cleaning?</td></tr>
+<tr>
+<td>log.cleaner.io.buffer.load.factor</td><td>double</td><td>0.9</td><td></td><td>medium</td><td>Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value will allow more log to be cleaned at once but will lead to more hash collisions</td></tr>
+<tr>
+<td>log.cleaner.io.buffer.size</td><td>int</td><td>524288</td><td>[0,...]</td><td>medium</td><td>The total memory used for log cleaner I/O buffers across all cleaner threads</td></tr>
+<tr>
+<td>log.cleaner.io.max.bytes.per.second</td><td>double</td><td>1.7976931348623157E308</td><td></td><td>medium</td><td>The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average</td></tr>
+<tr>
+<td>log.cleaner.min.cleanable.ratio</td><td>double</td><td>0.5</td><td></td><td>medium</td><td>The minimum ratio of dirty log to total log for a log to eligible for cleaning</td></tr>
+<tr>
+<td>log.cleaner.threads</td><td>int</td><td>1</td><td>[0,...]</td><td>medium</td><td>The number of background threads to use for log cleaning</td></tr>
+<tr>
+<td>log.cleanup.policy</td><td>string</td><td>delete</td><td>[compact, delete]</td><td>medium</td><td>The default cleanup policy for segments beyond the retention window, must be either "delete" or "compact"</td></tr>
+<tr>
+<td>log.index.interval.bytes</td><td>int</td><td>4096</td><td>[0,...]</td><td>medium</td><td>The interval with which we add an entry to the offset index</td></tr>
+<tr>
+<td>log.index.size.max.bytes</td><td>int</td><td>10485760</td><td>[4,...]</td><td>medium</td><td>The maximum size in bytes of the offset index</td></tr>
+<tr>
+<td>log.preallocate</td><td>boolean</td><td>false</td><td></td><td>medium</td><td>Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true.</td></tr>
+<tr>
+<td>log.retention.check.interval.ms</td><td>long</td><td>300000</td><td>[1,...]</td><td>medium</td><td>The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion</td></tr>
+<tr>
+<td>max.connections.per.ip</td><td>int</td><td>2147483647</td><td>[1,...]</td><td>medium</td><td>The maximum number of connections we allow from each ip address</td></tr>
+<tr>
+<td>max.connections.per.ip.overrides</td><td>string</td><td>""</td><td></td><td>medium</td><td>Per-ip or hostname overrides to the default maximum number of connections</td></tr>
+<tr>
+<td>num.partitions</td><td>int</td><td>1</td><td>[1,...]</td><td>medium</td><td>The default number of log partitions per topic</td></tr>
+<tr>
+<td>principal.builder.class</td><td>class</td><td>class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder</td><td></td><td>medium</td><td>principal builder to generate a java Principal. This config is optional for client.</td></tr>
+<tr>
+<td>producer.purgatory.purge.interval.requests</td><td>int</td><td>1000</td><td></td><td>medium</td><td>The purge interval (in number of requests) of the producer request purgatory</td></tr>
+<tr>
+<td>replica.fetch.backoff.ms</td><td>int</td><td>1000</td><td>[0,...]</td><td>medium</td><td>The amount of time to sleep when fetch partition error occurs.</td></tr>
+<tr>
+<td>reserved.broker.max.id</td><td>int</td><td>1000</td><td>[0,...]</td><td>medium</td><td>reserved.broker.max.id</td></tr>
+<tr>
+<td>sasl.kerberos.kinit.cmd</td><td>string</td><td>/usr/bin/kinit</td><td></td><td>medium</td><td>Kerberos kinit command path. Default is /usr/bin/kinit</td></tr>
+<tr>
+<td>sasl.kerberos.min.time.before.relogin</td><td>long</td><td>60000</td><td></td><td>medium</td><td>Login thread sleep time between refresh attempts.</td></tr>
+<tr>
+<td>sasl.kerberos.principal.to.local.rules</td><td>list</td><td>[DEFAULT]</td><td></td><td>medium</td><td>A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form <username>/<hostname>@<REALM> are mapped to <username>.</td></tr>
+<tr>
+<td>sasl.kerberos.service.name</td><td>string</td><td>null</td><td></td><td>medium</td><td>The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.</td></tr>
+<tr>
+<td>sasl.kerberos.ticket.renew.jitter</td><td>double</td><td>0.05</td><td></td><td>medium</td><td>Percentage of random jitter added to the renewal time.</td></tr>
+<tr>
+<td>sasl.kerberos.ticket.renew.window.factor</td><td>double</td><td>0.8</td><td></td><td>medium</td><td>Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.</td></tr>
+<tr>
+<td>security.inter.broker.protocol</td><td>string</td><td>PLAINTEXT</td><td></td><td>medium</td><td>Security protocol used to communicate between brokers. Defaults to plain text.</td></tr>
+<tr>
+<td>ssl.cipher.suites</td><td>list</td><td>null</td><td></td><td>medium</td><td>A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.</td></tr>
+<tr>
+<td>ssl.client.auth</td><td>string</td><td>none</td><td>[required, requested, none]</td><td>medium</td><td>Configures kafka broker to request client authentication. The following settings are common:  <ul> <li><code>ssl.want.client.auth=required</code> If set to required client authentication is required. <li><code>ssl.client.auth=requested</code> This means client authentication is optional. unlike requested , if this option is set client can choose not to provide authentication information about itself <li><code>ssl.client.auth=none</code> This means client authentication is not needed.</td></tr>
+<tr>
+<td>ssl.enabled.protocols</td><td>list</td><td>[TLSv1.2, TLSv1.1, TLSv1]</td><td></td><td>medium</td><td>The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.</td></tr>
+<tr>
+<td>ssl.key.password</td><td>string</td><td>null</td><td></td><td>medium</td><td>The password of the private key in the key store file. This is optional for client.</td></tr>
+<tr>
+<td>ssl.keymanager.algorithm</td><td>string</td><td>SunX509</td><td></td><td>medium</td><td>The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.</td></tr>
+<tr>
+<td>ssl.keystore.location</td><td>string</td><td>null</td><td></td><td>medium</td><td>The location of the key store file. This is optional for client and can be used for two-way authentication for client.</td></tr>
+<tr>
+<td>ssl.keystore.password</td><td>string</td><td>null</td><td></td><td>medium</td><td>The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured. </td></tr>
+<tr>
+<td>ssl.keystore.type</td><td>string</td><td>JKS</td><td></td><td>medium</td><td>The file format of the key store file. This is optional for client. Default value is JKS</td></tr>
+<tr>
+<td>ssl.protocol</td><td>string</td><td>TLS</td><td></td><td>medium</td><td>The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.</td></tr>
+<tr>
+<td>ssl.provider</td><td>string</td><td>null</td><td></td><td>medium</td><td>The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.</td></tr>
+<tr>
+<td>ssl.trustmanager.algorithm</td><td>string</td><td>PKIX</td><td></td><td>medium</td><td>The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.</td></tr>
+<tr>
+<td>ssl.truststore.location</td><td>string</td><td>null</td><td></td><td>medium</td><td>The location of the trust store file. </td></tr>
+<tr>
+<td>ssl.truststore.password</td><td>string</td><td>null</td><td></td><td>medium</td><td>The password for the trust store file. </td></tr>
+<tr>
+<td>ssl.truststore.type</td><td>string</td><td>JKS</td><td></td><td>medium</td><td>The file format of the trust store file. Default value is JKS.</td></tr>
+<tr>
+<td>authorizer.class.name</td><td>string</td><td>""</td><td></td><td>low</td><td>The authorizer class that should be used for authorization</td></tr>
+<tr>
+<td>metric.reporters</td><td>list</td><td>[]</td><td></td><td>low</td><td>A list of classes to use as metrics reporters. Implementing the <code>MetricReporter</code> interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.</td></tr>
+<tr>
+<td>metrics.num.samples</td><td>int</td><td>2</td><td>[1,...]</td><td>low</td><td>The number of samples maintained to compute metrics.</td></tr>
+<tr>
+<td>metrics.sample.window.ms</td><td>long</td><td>30000</td><td>[1,...]</td><td>low</td><td>The number of samples maintained to compute metrics.</td></tr>
+<tr>
+<td>quota.window.num</td><td>int</td><td>11</td><td>[1,...]</td><td>low</td><td>The number of samples to retain in memory</td></tr>
+<tr>
+<td>quota.window.size.seconds</td><td>int</td><td>1</td><td>[1,...]</td><td>low</td><td>The time span of each sample</td></tr>
+<tr>
+<td>ssl.endpoint.identification.algorithm</td><td>string</td><td>null</td><td></td><td>low</td><td>The endpoint identification algorithm to validate server hostname using server certificate. </td></tr>
+<tr>
+<td>zookeeper.sync.time.ms</td><td>int</td><td>2000</td><td></td><td>low</td><td>How far a ZK follower can be behind a ZK leader</td></tr>
+</table>

http://git-wip-us.apache.org/repos/asf/kafka-site/blob/8c4a140c/090/migration.html
----------------------------------------------------------------------
diff --git a/090/migration.html b/090/migration.html
index 922415c..18ab6d4 100644
--- a/090/migration.html
+++ b/090/migration.html
@@ -1,3 +1,20 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+ 
+    http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <!--#include virtual="../includes/header.html" -->
 <h2>Migrating from 0.7.x to 0.8</h2>
 
@@ -14,4 +31,4 @@
     <li>Drink.
 </ol>
 
-<!--#include virtual="../includes/footer.html" -->
\ No newline at end of file
+<!--#include virtual="../includes/footer.html" -->