You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by ju...@apache.org on 2013/01/12 03:26:37 UTC

[9/11] Use uniform convention for naming properties keys; kafka-648; patched by Sriram Subramanian; reviewed by Jun Rao

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/log/LogTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/log/LogTest.scala b/core/src/test/scala/unit/kafka/log/LogTest.scala
index 900d0e2..786ae03 100644
--- a/core/src/test/scala/unit/kafka/log/LogTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogTest.scala
@@ -62,7 +62,7 @@ class LogTest extends JUnitSuite {
     val time: MockTime = new MockTime()
 
     // create a log
-    val log = new Log(logDir, 1000, config.maxMessageSize, 1000, rollMs, needsRecovery = false, time = time)
+    val log = new Log(logDir, 1000, config.messageMaxBytes, 1000, rollMs, needsRecovery = false, time = time)
     time.currentMs += rollMs + 1
 
     // segment age is less than its limit
@@ -96,7 +96,7 @@ class LogTest extends JUnitSuite {
     val logFileSize = msgPerSeg * (setSize - 1) // each segment will be 10 messages
 
     // create a log
-    val log = new Log(logDir, logFileSize, config.maxMessageSize, 1000, 10000, needsRecovery = false, time = time)
+    val log = new Log(logDir, logFileSize, config.messageMaxBytes, 1000, 10000, needsRecovery = false, time = time)
     assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
 
     // segments expire in size
@@ -109,12 +109,12 @@ class LogTest extends JUnitSuite {
   @Test
   def testLoadEmptyLog() {
     createEmptyLogs(logDir, 0)
-    new Log(logDir, 1024, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
+    new Log(logDir, 1024, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
   }
 
   @Test
   def testAppendAndRead() {
-    val log = new Log(logDir, 1024, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
+    val log = new Log(logDir, 1024, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
     val message = new Message(Integer.toString(42).getBytes())
     for(i <- 0 until 10)
       log.append(new ByteBufferMessageSet(NoCompressionCodec, message))
@@ -131,7 +131,7 @@ class LogTest extends JUnitSuite {
   @Test
   def testReadOutOfRange() {
     createEmptyLogs(logDir, 1024)
-    val log = new Log(logDir, 1024, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
+    val log = new Log(logDir, 1024, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
     assertEquals("Reading just beyond end of log should produce 0 byte read.", 0, log.read(1024, 1000).sizeInBytes)
     try {
       log.read(0, 1024)
@@ -151,7 +151,7 @@ class LogTest extends JUnitSuite {
   @Test
   def testLogRolls() {
     /* create a multipart log with 100 messages */
-    val log = new Log(logDir, 100, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
+    val log = new Log(logDir, 100, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
     val numMessages = 100
     val messageSets = (0 until numMessages).map(i => TestUtils.singleMessageSet(i.toString.getBytes))
     val offsets = messageSets.map(log.append(_)._1)
@@ -173,7 +173,7 @@ class LogTest extends JUnitSuite {
   @Test
   def testCompressedMessages() {
     /* this log should roll after every messageset */
-    val log = new Log(logDir, 10, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
+    val log = new Log(logDir, 10, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
     
     /* append 2 compressed message sets, each with two messages giving offsets 0, 1, 2, 3 */
     log.append(new ByteBufferMessageSet(DefaultCompressionCodec, new Message("hello".getBytes), new Message("there".getBytes)))
@@ -206,7 +206,7 @@ class LogTest extends JUnitSuite {
   @Test
   def testEdgeLogRollsStartingAtZero() {
     // first test a log segment starting at 0
-    val log = new Log(logDir, 100, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
+    val log = new Log(logDir, 100, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
     val curOffset = log.logEndOffset
     assertEquals(curOffset, 0)
 
@@ -221,7 +221,7 @@ class LogTest extends JUnitSuite {
   @Test
   def testEdgeLogRollsStartingAtNonZero() {
     // second test an empty log segment starting at non-zero
-    val log = new Log(logDir, 100, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
+    val log = new Log(logDir, 100, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, time = time)
     val numMessages = 1
     for(i <- 0 until numMessages)
       log.append(TestUtils.singleMessageSet(i.toString.getBytes))
@@ -269,7 +269,7 @@ class LogTest extends JUnitSuite {
     val messageSize = 100
     val segmentSize = 7 * messageSize
     val indexInterval = 3 * messageSize
-    var log = new Log(logDir, segmentSize, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, indexIntervalBytes = indexInterval, maxIndexSize = 4096)
+    var log = new Log(logDir, segmentSize, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, indexIntervalBytes = indexInterval, maxIndexSize = 4096)
     for(i <- 0 until numMessages)
       log.append(TestUtils.singleMessageSet(TestUtils.randomBytes(messageSize)))
     assertEquals("After appending %d messages to an empty log, the log end offset should be %d".format(numMessages, numMessages), numMessages, log.logEndOffset)
@@ -278,14 +278,14 @@ class LogTest extends JUnitSuite {
     log.close()
     
     // test non-recovery case
-    log = new Log(logDir, segmentSize, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, indexIntervalBytes = indexInterval, maxIndexSize = 4096)
+    log = new Log(logDir, segmentSize, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = false, indexIntervalBytes = indexInterval, maxIndexSize = 4096)
     assertEquals("Should have %d messages when log is reopened w/o recovery".format(numMessages), numMessages, log.logEndOffset)
     assertEquals("Should have same last index offset as before.", lastIndexOffset, log.segments.view.last.index.lastOffset)
     assertEquals("Should have same number of index entries as before.", numIndexEntries, log.segments.view.last.index.entries)
     log.close()
     
     // test 
-    log = new Log(logDir, segmentSize, config.maxMessageSize, 1000, config.logRollHours*60*60*1000L, needsRecovery = true, indexIntervalBytes = indexInterval, maxIndexSize = 4096)
+    log = new Log(logDir, segmentSize, config.messageMaxBytes, 1000, config.logRollHours*60*60*1000L, needsRecovery = true, indexIntervalBytes = indexInterval, maxIndexSize = 4096)
     assertEquals("Should have %d messages when log is reopened with recovery".format(numMessages), numMessages, log.logEndOffset)
     assertEquals("Should have same last index offset as before.", lastIndexOffset, log.segments.view.last.index.lastOffset)
     assertEquals("Should have same number of index entries as before.", numIndexEntries, log.segments.view.last.index.entries)
@@ -300,7 +300,7 @@ class LogTest extends JUnitSuite {
     val logFileSize = msgPerSeg * (setSize - 1) // each segment will be 10 messages
 
     // create a log
-    val log = new Log(logDir, logFileSize, config.maxMessageSize, 1000, 10000, needsRecovery = false, time = time)
+    val log = new Log(logDir, logFileSize, config.messageMaxBytes, 1000, 10000, needsRecovery = false, time = time)
     assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
 
     for (i<- 1 to msgPerSeg)
@@ -349,7 +349,7 @@ class LogTest extends JUnitSuite {
     val setSize = set.sizeInBytes
     val msgPerSeg = 10
     val logFileSize = msgPerSeg * (setSize - 1) // each segment will be 10 messages
-    val log = new Log(logDir, logFileSize, config.maxMessageSize, 1000, 10000, needsRecovery = false, time = time)
+    val log = new Log(logDir, logFileSize, config.messageMaxBytes, 1000, 10000, needsRecovery = false, time = time)
     assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
     for (i<- 1 to msgPerSeg)
       log.append(set)
@@ -373,7 +373,7 @@ class LogTest extends JUnitSuite {
       logDir.mkdir()
       var log = new Log(logDir, 
                         maxLogFileSize = 64*1024, 
-                        maxMessageSize = config.maxMessageSize, 
+                        maxMessageSize = config.messageMaxBytes,
                         maxIndexSize = 1000, 
                         indexIntervalBytes = 10000, 
                         needsRecovery = true)
@@ -403,7 +403,7 @@ class LogTest extends JUnitSuite {
     val set = TestUtils.singleMessageSet("test".getBytes())
     val log = new Log(logDir, 
                       maxLogFileSize = set.sizeInBytes * 5, 
-                      maxMessageSize = config.maxMessageSize,
+                      maxMessageSize = config.messageMaxBytes,
                       maxIndexSize = 1000, 
                       indexIntervalBytes = 1, 
                       needsRecovery = false)
@@ -425,7 +425,7 @@ class LogTest extends JUnitSuite {
     // create a log
     var log = new Log(logDir, 
                       maxLogFileSize = set.sizeInBytes * 5, 
-                      maxMessageSize = config.maxMessageSize, 
+                      maxMessageSize = config.messageMaxBytes,
                       maxIndexSize = 1000, 
                       indexIntervalBytes = 10000, 
                       needsRecovery = true)
@@ -436,7 +436,7 @@ class LogTest extends JUnitSuite {
     log.close()
     log = new Log(logDir, 
                   maxLogFileSize = set.sizeInBytes * 5, 
-                  maxMessageSize = config.maxMessageSize, 
+                  maxMessageSize = config.messageMaxBytes,
                   maxIndexSize = 1000, 
                   indexIntervalBytes = 10000, 
                   needsRecovery = true)

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala b/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala
index 3e46dd7..beb63a4 100644
--- a/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala
+++ b/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala
@@ -63,8 +63,8 @@ class AsyncProducerTest extends JUnit3Suite {
     props.put("serializer.class", "kafka.serializer.StringEncoder")
     props.put("broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
     props.put("producer.type", "async")
-    props.put("queue.size", "10")
-    props.put("batch.size", "1")
+    props.put("queue.buffering.max.messages", "10")
+    props.put("batch.num.messages", "1")
 
     val config = new ProducerConfig(props)
     val produceData = getProduceData(12)
@@ -87,7 +87,7 @@ class AsyncProducerTest extends JUnit3Suite {
     props.put("serializer.class", "kafka.serializer.StringEncoder")
     props.put("broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
     props.put("producer.type", "async")
-    props.put("batch.size", "1")
+    props.put("batch.num.messages", "1")
 
     val config = new ProducerConfig(props)
     val produceData = getProduceData(10)
@@ -358,7 +358,7 @@ class AsyncProducerTest extends JUnit3Suite {
     val props = new Properties()
     props.put("serializer.class", "kafka.serializer.StringEncoder")
     props.put("producer.type", "async")
-    props.put("batch.size", "5")
+    props.put("batch.num.messages", "5")
     props.put("broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
 
     val config = new ProducerConfig(props)

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/producer/ProducerTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/producer/ProducerTest.scala b/core/src/test/scala/unit/kafka/producer/ProducerTest.scala
index 48842eb..792919b 100644
--- a/core/src/test/scala/unit/kafka/producer/ProducerTest.scala
+++ b/core/src/test/scala/unit/kafka/producer/ProducerTest.scala
@@ -140,13 +140,13 @@ class ProducerTest extends JUnit3Suite with ZooKeeperTestHarness with Logging{
     props1.put("serializer.class", "kafka.serializer.StringEncoder")
     props1.put("partitioner.class", "kafka.utils.StaticPartitioner")
     props1.put("broker.list", TestUtils.getBrokerListStrFromConfigs(Seq(config1, config2)))
-    props1.put("producer.request.required.acks", "2")
-    props1.put("producer.request.timeout.ms", "1000")
+    props1.put("request.required.acks", "2")
+    props1.put("request.timeout.ms", "1000")
 
     val props2 = new util.Properties()
     props2.putAll(props1)
-    props2.put("producer.request.required.acks", "3")
-    props2.put("producer.request.timeout.ms", "1000")
+    props2.put("request.required.acks", "3")
+    props2.put("request.timeout.ms", "1000")
 
     val producerConfig1 = new ProducerConfig(props1)
     val producerConfig2 = new ProducerConfig(props2)
@@ -198,8 +198,8 @@ class ProducerTest extends JUnit3Suite with ZooKeeperTestHarness with Logging{
     val props = new Properties()
     props.put("serializer.class", "kafka.serializer.StringEncoder")
     props.put("partitioner.class", "kafka.utils.StaticPartitioner")
-    props.put("producer.request.timeout.ms", "2000")
-//    props.put("producer.request.required.acks", "-1")
+    props.put("request.timeout.ms", "2000")
+//    props.put("request.required.acks", "-1")
     props.put("broker.list", TestUtils.getBrokerListStrFromConfigs(Seq(config1, config2)))
 
     // create topic
@@ -256,7 +256,7 @@ class ProducerTest extends JUnit3Suite with ZooKeeperTestHarness with Logging{
     val props = new Properties()
     props.put("serializer.class", "kafka.serializer.StringEncoder")
     props.put("partitioner.class", "kafka.utils.StaticPartitioner")
-    props.put("producer.request.timeout.ms", String.valueOf(timeoutMs))
+    props.put("request.timeout.ms", String.valueOf(timeoutMs))
     props.put("broker.list", TestUtils.getBrokerListStrFromConfigs(Seq(config1, config2)))
 
     val config = new ProducerConfig(props)
@@ -300,7 +300,7 @@ class ProducerTest extends JUnit3Suite with ZooKeeperTestHarness with Logging{
 
     // make sure we don't wait fewer than numRetries*timeoutMs milliseconds
     // we do this because the DefaultEventHandler retries a number of times
-    assertTrue((t2-t1) >= timeoutMs*config.producerRetries)
+    assertTrue((t2-t1) >= timeoutMs*config.messageSendMaxRetries)
   }
 }
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala b/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala
index b289dda..89ba944 100644
--- a/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala
+++ b/core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala
@@ -41,7 +41,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness {
     val props = new Properties()
     props.put("host", "localhost")
     props.put("port", server.socketServer.port.toString)
-    props.put("buffer.size", "102400")
+    props.put("send.buffer.bytes", "102400")
     props.put("connect.timeout.ms", "500")
     props.put("reconnect.interval", "1000")
     val producer = new SyncProducer(new SyncProducerConfig(props))
@@ -77,10 +77,9 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness {
     val props = new Properties()
     props.put("host", "localhost")
     props.put("port", server.socketServer.port.toString)
-    props.put("buffer.size", "102400")
+    props.put("send.buffer.bytes", "102400")
     props.put("connect.timeout.ms", "300")
     props.put("reconnect.interval", "500")
-    props.put("max.message.size", "100")
     val correlationId = 0
     val clientId = SyncProducerConfig.DefaultClientId
     val ackTimeoutMs = SyncProducerConfig.DefaultAckTimeoutMs
@@ -98,12 +97,11 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness {
     val props = new Properties()
     props.put("host", "localhost")
     props.put("port", server.socketServer.port.toString)
-    props.put("max.message.size", 50000.toString)
     val producer = new SyncProducer(new SyncProducerConfig(props))
     CreateTopicCommand.createTopic(zkClient, "test", 1, 1)
     TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, "test", 0, 500)
 
-    val message1 = new Message(new Array[Byte](configs(0).maxMessageSize + 1))
+    val message1 = new Message(new Array[Byte](configs(0).messageMaxBytes + 1))
     val messageSet1 = new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, messages = message1)
     val response1 = producer.send(TestUtils.produceRequest("test", 0, messageSet1))
 
@@ -111,7 +109,7 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness {
     Assert.assertEquals(ErrorMapping.MessageSizeTooLargeCode, response1.status(TopicAndPartition("test", 0)).error)
     Assert.assertEquals(-1L, response1.status(TopicAndPartition("test", 0)).offset)
 
-    val safeSize = configs(0).maxMessageSize - Message.MessageOverhead - MessageSet.LogOverhead - 1
+    val safeSize = configs(0).messageMaxBytes - Message.MessageOverhead - MessageSet.LogOverhead - 1
     val message2 = new Message(new Array[Byte](safeSize))
     val messageSet2 = new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, messages = message2)
     val response2 = producer.send(TestUtils.produceRequest("test", 0, messageSet2))
@@ -127,10 +125,9 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness {
     val props = new Properties()
     props.put("host", "localhost")
     props.put("port", server.socketServer.port.toString)
-    props.put("buffer.size", "102400")
+    props.put("send.buffer.bytes", "102400")
     props.put("connect.timeout.ms", "300")
     props.put("reconnect.interval", "500")
-    props.put("max.message.size", "100")
 
     val producer = new SyncProducer(new SyncProducerConfig(props))
     val messages = new ByteBufferMessageSet(NoCompressionCodec, new Message(messageBytes))
@@ -179,8 +176,8 @@ class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness {
     val props = new Properties()
     props.put("host", "localhost")
     props.put("port", server.socketServer.port.toString)
-    props.put("buffer.size", "102400")
-    props.put("producer.request.timeout.ms", String.valueOf(timeoutMs))
+    props.put("send.buffer.bytes", "102400")
+    props.put("request.timeout.ms", String.valueOf(timeoutMs))
     val producer = new SyncProducer(new SyncProducerConfig(props))
 
     val messages = new ByteBufferMessageSet(NoCompressionCodec, new Message(messageBytes))

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/server/ISRExpirationTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/server/ISRExpirationTest.scala b/core/src/test/scala/unit/kafka/server/ISRExpirationTest.scala
index 3dfb406..6184f42 100644
--- a/core/src/test/scala/unit/kafka/server/ISRExpirationTest.scala
+++ b/core/src/test/scala/unit/kafka/server/ISRExpirationTest.scala
@@ -29,8 +29,8 @@ class IsrExpirationTest extends JUnit3Suite {
 
   var topicPartitionIsr: Map[(String, Int), Seq[Int]] = new HashMap[(String, Int), Seq[Int]]()
   val configs = TestUtils.createBrokerConfigs(2).map(new KafkaConfig(_) {
-    override val replicaMaxLagTimeMs = 100L
-    override val replicaMaxLagBytes = 10L
+    override val replicaLagTimeMaxMs = 100L
+    override val replicaLagMaxMessages = 10L
   })
   val topic = "foo"
 
@@ -45,7 +45,7 @@ class IsrExpirationTest extends JUnit3Suite {
 
     // let the follower catch up to 10
     (partition0.assignedReplicas() - leaderReplica).foreach(r => r.logEndOffset = 10)
-    var partition0OSR = partition0.getOutOfSyncReplicas(leaderReplica, configs.head.replicaMaxLagTimeMs, configs.head.replicaMaxLagBytes)
+    var partition0OSR = partition0.getOutOfSyncReplicas(leaderReplica, configs.head.replicaLagTimeMaxMs, configs.head.replicaLagMaxMessages)
     assertEquals("No replica should be out of sync", Set.empty[Int], partition0OSR.map(_.brokerId))
 
     // let some time pass
@@ -53,7 +53,7 @@ class IsrExpirationTest extends JUnit3Suite {
 
     // now follower (broker id 1) has caught up to only 10, while the leader is at 15 AND the follower hasn't
     // pulled any data for > replicaMaxLagTimeMs ms. So it is stuck
-    partition0OSR = partition0.getOutOfSyncReplicas(leaderReplica, configs.head.replicaMaxLagTimeMs, configs.head.replicaMaxLagBytes)
+    partition0OSR = partition0.getOutOfSyncReplicas(leaderReplica, configs.head.replicaLagTimeMaxMs, configs.head.replicaLagMaxMessages)
     assertEquals("Replica 1 should be out of sync", Set(configs.last.brokerId), partition0OSR.map(_.brokerId))
     EasyMock.verify(log)
   }
@@ -71,7 +71,7 @@ class IsrExpirationTest extends JUnit3Suite {
 
     // now follower (broker id 1) has caught up to only 4, while the leader is at 15. Since the gap it larger than
     // replicaMaxLagBytes, the follower is out of sync.
-    val partition0OSR = partition0.getOutOfSyncReplicas(leaderReplica, configs.head.replicaMaxLagTimeMs, configs.head.replicaMaxLagBytes)
+    val partition0OSR = partition0.getOutOfSyncReplicas(leaderReplica, configs.head.replicaLagTimeMaxMs, configs.head.replicaLagMaxMessages)
     assertEquals("Replica 1 should be out of sync", Set(configs.last.brokerId), partition0OSR.map(_.brokerId))
 
     EasyMock.verify(log)

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
index a3afa2d..cd724a3 100644
--- a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
+++ b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
@@ -28,9 +28,9 @@ import kafka.producer.{ProducerConfig, KeyedMessage, Producer}
 class LogRecoveryTest extends JUnit3Suite with ZooKeeperTestHarness {
 
   val configs = TestUtils.createBrokerConfigs(2).map(new KafkaConfig(_) {
-    override val replicaMaxLagTimeMs = 5000L
-    override val replicaMaxLagBytes = 10L
-    override val replicaMinBytes = 20
+    override val replicaLagTimeMaxMs = 5000L
+    override val replicaLagMaxMessages = 10L
+    override val replicaFetchMinBytes = 20
   })
   val topic = "new-topic"
   val partitionId = 0
@@ -50,7 +50,7 @@ class LogRecoveryTest extends JUnit3Suite with ZooKeeperTestHarness {
   
   val producerProps = getProducerConfig(TestUtils.getBrokerListStrFromConfigs(configs), 64*1024, 100000, 10000)
   producerProps.put("key.serializer.class", classOf[IntEncoder].getName.toString)
-  producerProps.put("producer.request.required.acks", "-1")
+  producerProps.put("request.required.acks", "-1")
 
 
   def testHWCheckpointNoFailuresSingleLogSegment {

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/server/SimpleFetchTest.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/server/SimpleFetchTest.scala b/core/src/test/scala/unit/kafka/server/SimpleFetchTest.scala
index 0377e08..1557047 100644
--- a/core/src/test/scala/unit/kafka/server/SimpleFetchTest.scala
+++ b/core/src/test/scala/unit/kafka/server/SimpleFetchTest.scala
@@ -33,8 +33,8 @@ import kafka.common.TopicAndPartition
 class SimpleFetchTest extends JUnit3Suite {
 
   val configs = TestUtils.createBrokerConfigs(2).map(new KafkaConfig(_) {
-    override val replicaMaxLagTimeMs = 100L
-    override val replicaMaxLagBytes = 10L
+    override val replicaLagTimeMaxMs = 100L
+    override val replicaLagMaxMessages = 10L
   })
   val topic = "foo"
   val partitionId = 0

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/core/src/test/scala/unit/kafka/utils/TestUtils.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
index a508895..9400328 100644
--- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala
+++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
@@ -123,11 +123,11 @@ object TestUtils extends Logging {
    */
   def createBrokerConfig(nodeId: Int, port: Int): Properties = {
     val props = new Properties
-    props.put("brokerid", nodeId.toString)
-    props.put("hostname", "localhost")
+    props.put("broker.id", nodeId.toString)
+    props.put("host.name", "localhost")
     props.put("port", port.toString)
     props.put("log.dir", TestUtils.tempDir().getAbsolutePath)
-    props.put("log.flush.interval", "1")
+    props.put("log.flush.interval.messages", "1")
     props.put("zk.connect", TestZKUtils.zookeeperConnect)
     props.put("replica.socket.timeout.ms", "1500")
     props
@@ -140,13 +140,13 @@ object TestUtils extends Logging {
                                consumerTimeout: Long = -1): Properties = {
     val props = new Properties
     props.put("zk.connect", zkConnect)
-    props.put("groupid", groupId)
-    props.put("consumerid", consumerId)
+    props.put("group.id", groupId)
+    props.put("consumer.id", consumerId)
     props.put("consumer.timeout.ms", consumerTimeout.toString)
-    props.put("zk.sessiontimeout.ms", "400")
-    props.put("zk.synctime.ms", "200")
-    props.put("autocommit.interval.ms", "1000")
-    props.put("rebalance.retries.max", "4")
+    props.put("zk.session.timeout.ms", "400")
+    props.put("zk.sync.time.ms", "200")
+    props.put("auto.commit.interval.ms", "1000")
+    props.put("rebalance.max.retries", "4")
 
     props
   }
@@ -293,7 +293,7 @@ object TestUtils extends Logging {
                            keyEncoder: Encoder[K] = new DefaultEncoder()): Producer[K, V] = {
     val props = new Properties()
     props.put("broker.list", brokerList)
-    props.put("buffer.size", "65536")
+    props.put("send.buffer.bytes", "65536")
     props.put("connect.timeout.ms", "100000")
     props.put("reconnect.interval", "10000")
     props.put("serializer.class", encoder.getClass.getCanonicalName)
@@ -307,10 +307,10 @@ object TestUtils extends Logging {
     props.put("producer.type", "sync")
     props.put("broker.list", brokerList)
     props.put("partitioner.class", "kafka.utils.FixedValuePartitioner")
-    props.put("buffer.size", bufferSize.toString)
+    props.put("send.buffer.bytes", bufferSize.toString)
     props.put("connect.timeout.ms", connectTimeout.toString)
     props.put("reconnect.interval", reconnectInterval.toString)
-    props.put("producer.request.timeout.ms", 30000.toString)
+    props.put("request.timeout.ms", 30000.toString)
     props.put("serializer.class", classOf[StringEncoder].getName.toString)
     props
   }

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/examples/src/main/java/kafka/examples/Consumer.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/kafka/examples/Consumer.java b/examples/src/main/java/kafka/examples/Consumer.java
index 2b87560..3460d36 100644
--- a/examples/src/main/java/kafka/examples/Consumer.java
+++ b/examples/src/main/java/kafka/examples/Consumer.java
@@ -44,10 +44,10 @@ public class Consumer extends Thread
   {
     Properties props = new Properties();
     props.put("zk.connect", KafkaProperties.zkConnect);
-    props.put("groupid", KafkaProperties.groupId);
-    props.put("zk.sessiontimeout.ms", "400");
-    props.put("zk.synctime.ms", "200");
-    props.put("autocommit.interval.ms", "1000");
+    props.put("group.id", KafkaProperties.groupId);
+    props.put("zk.session.timeout.ms", "400");
+    props.put("zk.sync.time.ms", "200");
+    props.put("auto.commit.interval.ms", "1000");
 
     return new ConsumerConfig(props);
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/perf/src/main/scala/kafka/perf/ConsumerPerformance.scala
----------------------------------------------------------------------
diff --git a/perf/src/main/scala/kafka/perf/ConsumerPerformance.scala b/perf/src/main/scala/kafka/perf/ConsumerPerformance.scala
index a720ced..ee2ce95 100644
--- a/perf/src/main/scala/kafka/perf/ConsumerPerformance.scala
+++ b/perf/src/main/scala/kafka/perf/ConsumerPerformance.scala
@@ -74,7 +74,7 @@ object ConsumerPerformance {
     if(!config.showDetailedStats) {
       val totalMBRead = (totalBytesRead.get*1.0)/(1024*1024)
       println(("%s, %s, %d, %.4f, %.4f, %d, %.4f").format(config.dateFormat.format(startMs), config.dateFormat.format(endMs),
-        config.consumerConfig.fetchSize, totalMBRead, totalMBRead/elapsedSecs, totalMessagesRead.get,
+        config.consumerConfig.fetchMessageMaxBytes, totalMBRead, totalMBRead/elapsedSecs, totalMessagesRead.get,
         totalMessagesRead.get/elapsedSecs))
     }
     System.exit(0)
@@ -124,10 +124,10 @@ object ConsumerPerformance {
     }
 
     val props = new Properties
-    props.put("groupid", options.valueOf(groupIdOpt))
-    props.put("socket.buffer.size", options.valueOf(socketBufferSizeOpt).toString)
-    props.put("fetch.size", options.valueOf(fetchSizeOpt).toString)
-    props.put("autooffset.reset", if(options.has(resetBeginningOffsetOpt)) "largest" else "smallest")
+    props.put("group.id", options.valueOf(groupIdOpt))
+    props.put("socket.receive.buffer.bytes", options.valueOf(socketBufferSizeOpt).toString)
+    props.put("fetch.message.max.bytes", options.valueOf(fetchSizeOpt).toString)
+    props.put("auto.offset.reset", if(options.has(resetBeginningOffsetOpt)) "largest" else "smallest")
     props.put("zk.connect", options.valueOf(zkConnectOpt))
     props.put("consumer.timeout.ms", "5000")
     val consumerConfig = new ConsumerConfig(props)
@@ -190,7 +190,7 @@ object ConsumerPerformance {
       val totalMBRead = (bytesRead*1.0)/(1024*1024)
       val mbRead = ((bytesRead - lastBytesRead)*1.0)/(1024*1024)
       println(("%s, %d, %d, %.4f, %.4f, %d, %.4f").format(config.dateFormat.format(endMs), id,
-        config.consumerConfig.fetchSize, totalMBRead,
+        config.consumerConfig.fetchMessageMaxBytes, totalMBRead,
         1000.0*(mbRead/elapsedMs), messagesRead, ((messagesRead - lastMessagesRead)/elapsedMs)*1000.0))
     }
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/perf/src/main/scala/kafka/perf/ProducerPerformance.scala
----------------------------------------------------------------------
diff --git a/perf/src/main/scala/kafka/perf/ProducerPerformance.scala b/perf/src/main/scala/kafka/perf/ProducerPerformance.scala
index 0367af2..4822a7e 100644
--- a/perf/src/main/scala/kafka/perf/ProducerPerformance.scala
+++ b/perf/src/main/scala/kafka/perf/ProducerPerformance.scala
@@ -191,17 +191,17 @@ object ProducerPerformance extends Logging {
     props.put("broker.list", config.brokerList)
     props.put("compression.codec", config.compressionCodec.codec.toString)
     props.put("reconnect.interval", Integer.MAX_VALUE.toString)
-    props.put("buffer.size", (64*1024).toString)
+    props.put("send.buffer.bytes", (64*1024).toString)
     if(!config.isSync) {
       props.put("producer.type","async")
-      props.put("batch.size", config.batchSize.toString)
-      props.put("queue.enqueueTimeout.ms", "-1")
+      props.put("batch.num.messages", config.batchSize.toString)
+      props.put("queue.enqueue.timeout.ms", "-1")
     }
-    props.put("clientid", "ProducerPerformance")
-    props.put("producer.request.required.acks", config.producerRequestRequiredAcks.toString)
-    props.put("producer.request.timeout.ms", config.producerRequestTimeoutMs.toString)
-    props.put("producer.num.retries", config.producerNumRetries.toString)
-    props.put("producer.retry.backoff.ms", config.producerRetryBackoffMs.toString)
+    props.put("client.id", "ProducerPerformance")
+    props.put("request.required.acks", config.producerRequestRequiredAcks.toString)
+    props.put("request.timeout.ms", config.producerRequestTimeoutMs.toString)
+    props.put("message.send.max.retries", config.producerNumRetries.toString)
+    props.put("retry.backoff.ms", config.producerRetryBackoffMs.toString)
     props.put("serializer.class", classOf[DefaultEncoder].getName.toString)
     props.put("key.serializer.class", classOf[NullEncoder[Long]].getName.toString)
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/mirror_producer.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/mirror_producer.properties b/system_test/broker_failure/config/mirror_producer.properties
index 9ea68d0..7f80a1e 100644
--- a/system_test/broker_failure/config/mirror_producer.properties
+++ b/system_test/broker_failure/config/mirror_producer.properties
@@ -18,10 +18,10 @@
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 producer.type=async
 
 # to avoid dropping events if the queue is full, wait indefinitely
-queue.enqueueTimeout.ms=-1
+queue.enqueue.timeout.ms=-1
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/mirror_producer1.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/mirror_producer1.properties b/system_test/broker_failure/config/mirror_producer1.properties
index 7f37db3..81dae76 100644
--- a/system_test/broker_failure/config/mirror_producer1.properties
+++ b/system_test/broker_failure/config/mirror_producer1.properties
@@ -19,10 +19,10 @@
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 producer.type=async
 
 # to avoid dropping events if the queue is full, wait indefinitely
-queue.enqueueTimeout.ms=-1
+queue.enqueue.timeout.ms=-1
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/mirror_producer2.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/mirror_producer2.properties b/system_test/broker_failure/config/mirror_producer2.properties
index 047f840..714b95d 100644
--- a/system_test/broker_failure/config/mirror_producer2.properties
+++ b/system_test/broker_failure/config/mirror_producer2.properties
@@ -19,10 +19,10 @@
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 producer.type=async
 
 # to avoid dropping events if the queue is full, wait indefinitely
-queue.enqueueTimeout.ms=-1
+queue.enqueue.timeout.ms=-1
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/mirror_producer3.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/mirror_producer3.properties b/system_test/broker_failure/config/mirror_producer3.properties
index 5e8b7dc..e8fa72d 100644
--- a/system_test/broker_failure/config/mirror_producer3.properties
+++ b/system_test/broker_failure/config/mirror_producer3.properties
@@ -19,10 +19,10 @@
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 producer.type=async
 
 # to avoid dropping events if the queue is full, wait indefinitely
-queue.enqueueTimeout.ms=-1
+queue.enqueue.timeout.ms=-1
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/server_source1.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_source1.properties b/system_test/broker_failure/config/server_source1.properties
index 1a16c2c..bbf288e 100644
--- a/system_test/broker_failure/config/server_source1.properties
+++ b/system_test/broker_failure/config/server_source1.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=1
+broker.id=1
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source1-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.bytes=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/server_source2.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_source2.properties b/system_test/broker_failure/config/server_source2.properties
index 032bbcc..570bafc 100644
--- a/system_test/broker_failure/config/server_source2.properties
+++ b/system_test/broker_failure/config/server_source2.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=2
+broker.id=2
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source2-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.bytes=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/server_source3.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_source3.properties b/system_test/broker_failure/config/server_source3.properties
index 05b3a97..df8ff6a 100644
--- a/system_test/broker_failure/config/server_source3.properties
+++ b/system_test/broker_failure/config/server_source3.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=3
+broker.id=3
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source3-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.size=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/server_source4.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_source4.properties b/system_test/broker_failure/config/server_source4.properties
index c94204d..ee9c7fd 100644
--- a/system_test/broker_failure/config/server_source4.properties
+++ b/system_test/broker_failure/config/server_source4.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=4
+broker.id=4
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source4-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.bytes=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/server_target1.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_target1.properties b/system_test/broker_failure/config/server_target1.properties
index e9cc038..7f776bd 100644
--- a/system_test/broker_failure/config/server_target1.properties
+++ b/system_test/broker_failure/config/server_target1.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=1
+broker.id=1
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-target1-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.bytes=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,16 +63,16 @@ enable.zookeeper=true
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 # topic partition count map
 # topic.partition.count.map=topic1:3, topic2:4

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/server_target2.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_target2.properties b/system_test/broker_failure/config/server_target2.properties
index 6007fa8..6d997dc 100644
--- a/system_test/broker_failure/config/server_target2.properties
+++ b/system_test/broker_failure/config/server_target2.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=2
+broker.id=2
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-target2-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.bytes=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,16 +63,16 @@ enable.zookeeper=true
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 # topic partition count map
 # topic.partition.count.map=topic1:3, topic2:4

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/server_target3.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_target3.properties b/system_test/broker_failure/config/server_target3.properties
index 9ac0b06..0d3a9ae 100644
--- a/system_test/broker_failure/config/server_target3.properties
+++ b/system_test/broker_failure/config/server_target3.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=3
+broker.id=3
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-target3-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.bytes=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,16 +63,16 @@ enable.zookeeper=true
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 # topic partition count map
 # topic.partition.count.map=topic1:3, topic2:4

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/broker_failure/config/whitelisttest.consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/whitelisttest.consumer.properties b/system_test/broker_failure/config/whitelisttest.consumer.properties
index aaa3f7c..dd91bd3 100644
--- a/system_test/broker_failure/config/whitelisttest.consumer.properties
+++ b/system_test/broker_failure/config/whitelisttest.consumer.properties
@@ -20,10 +20,10 @@
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 #consumer group id
-groupid=group1
+group.id=group1
 
 mirror.topics.whitelist=test_1,test_2
-autooffset.reset=smallest
+auto.offset.reset=smallest

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/common/util.sh
----------------------------------------------------------------------
diff --git a/system_test/common/util.sh b/system_test/common/util.sh
index d3ee607..e3d10c6 100644
--- a/system_test/common/util.sh
+++ b/system_test/common/util.sh
@@ -72,7 +72,7 @@ kill_child_processes() {
 #    from the settings in config/server.properties while the brokerid and
 #    server port will be incremented accordingly
 # 3. to generate properties files with non-default values such as 
-#    "socket.send.buffer=2097152", simply add the property with new value
+#    "socket.send.buffer.bytes=2097152", simply add the property with new value
 #    to the array variable kafka_properties_to_replace as shown below
 # =========================================================================
 generate_kafka_properties_files() {
@@ -103,10 +103,10 @@ generate_kafka_properties_files() {
     # values. Other kafka properties can be added
     # in a similar fashion.
     # =============================================
-    # kafka_properties_to_replace[1]="socket.send.buffer=2097152"
-    # kafka_properties_to_replace[2]="socket.receive.buffer=2097152"
+    # kafka_properties_to_replace[1]="socket.send.buffer.bytes=2097152"
+    # kafka_properties_to_replace[2]="socket.receive.buffer.bytes=2097152"
     # kafka_properties_to_replace[3]="num.partitions=3"
-    # kafka_properties_to_replace[4]="max.socket.request.bytes=10485760"
+    # kafka_properties_to_replace[4]="socket.request.max.bytes=10485760"
 
     server_properties=`cat ${this_config_dir}/server.properties`
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/migration_tool_testsuite/config/migration_producer.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/config/migration_producer.properties b/system_test/migration_tool_testsuite/config/migration_producer.properties
index a923ee3..af080ae 100644
--- a/system_test/migration_tool_testsuite/config/migration_producer.properties
+++ b/system_test/migration_tool_testsuite/config/migration_producer.properties
@@ -26,10 +26,10 @@ broker.list=localhost:9094,localhost:9095,localhost:9096
 #zk.connect=
 
 # zookeeper session timeout; default is 6000
-#zk.sessiontimeout.ms=
+#zk.session.timeout.ms=
 
 # the max time that the client waits to establish a connection to zookeeper; default is 6000
-#zk.connectiontimeout.ms
+#zk.connection.timeout.ms
 
 # name of the partitioner class for partitioning events; default partition spreads data randomly
 #partitioner.class=
@@ -46,35 +46,18 @@ serializer.class=kafka.serializer.DefaultEncoder
 # allow topic level compression
 #compressed.topics=
 
-# max message size; messages larger than that size are discarded; default is 1000000
-#max.message.size=
-
-
 ############################# Async Producer #############################
 # maximum time, in milliseconds, for buffering data on the producer queue 
-#queue.time=
+#queue.buffering.max.ms=
 
 # the maximum size of the blocking queue for buffering on the producer 
-#queue.size=
+#queue.buffering.max.messages=
 
 # Timeout for event enqueue:
 # 0: events will be enqueued immediately or dropped if the queue is full
 # -ve: enqueue will block indefinitely if the queue is full
 # +ve: enqueue will block up to this many milliseconds if the queue is full
-#queue.enqueueTimeout.ms=
+#queue.enqueue.timeout.ms=
 
 # the number of messages batched at the producer 
-#batch.size=
-
-# the callback handler for one or multiple events 
-#callback.handler=
-
-# properties required to initialize the callback handler 
-#callback.handler.props=
-
-# the handler for events 
-#event.handler=
-
-# properties required to initialize the event handler 
-#event.handler.props=
-
+#batch.num.messages=

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/migration_tool_testsuite/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/config/server.properties b/system_test/migration_tool_testsuite/config/server.properties
index 6976869..d231d4c 100644
--- a/system_test/migration_tool_testsuite/config/server.properties
+++ b/system_test/migration_tool_testsuite/config/server.properties
@@ -17,12 +17,12 @@
 ############################# Server Basics #############################
 
 # The id of the broker. This must be set to a unique integer for each broker.
-brokerid=0
+broker.id=0
 
 # Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
 # from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
 # may not be what you want.
-#hostname=
+#host.name=
 
 
 ############################# Socket Server Settings #############################
@@ -31,19 +31,19 @@ brokerid=0
 port=9091
 
 # The number of threads handling network requests
-network.threads=2
+num.network.threads=2
  
 # The number of threads doing disk I/O
-io.threads=2
+num.io.threads=2
 
 # The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # The maximum size of a request that the socket server will accept (protection against OOM)
-max.socket.request.bytes=104857600
+socket.request.max.bytes=104857600
 
 
 ############################# Log Basics #############################
@@ -70,16 +70,16 @@ num.partitions=5
 # every N messages (or both). This can be done globally and overridden on a per-topic basis.
 
 # The number of messages to accept before forcing a flush of data to disk
-log.flush.interval=10000
+log.flush.interval.messages=10000
 
 # The maximum amount of time a message can sit in a log before we force a flush
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
-# Per-topic overrides for log.default.flush.interval.ms
-#topic.flush.intervals.ms=topic1:1000, topic2:3000
+# Per-topic overrides for log.flush.interval.ms
+#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
 
 # The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 ############################# Log Retention Policy #############################
 
@@ -92,13 +92,13 @@ log.default.flush.scheduler.interval.ms=1000
 log.retention.hours=168
 
 # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.size.
-#log.retention.size=1073741824
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
 
 # The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.file.size=536870912
-#log.file.size=102400
-log.file.size=128
+#log.segment.bytes=536870912
+#log.segment.bytes=102400
+log.segment.bytes=128
 
 # The interval at which log segments are checked to see if they can be deleted according 
 # to the retention policies
@@ -117,6 +117,6 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # Timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 monitoring.period.secs=1

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/blacklisttest.consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/blacklisttest.consumer.properties b/system_test/mirror_maker/config/blacklisttest.consumer.properties
index 6ea85ec..ff12015 100644
--- a/system_test/mirror_maker/config/blacklisttest.consumer.properties
+++ b/system_test/mirror_maker/config/blacklisttest.consumer.properties
@@ -20,9 +20,9 @@
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 #consumer group id
-groupid=group1
-shallowiterator.enable=true
+group.id=group1
+shallow.iterator.enable=true
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/mirror_producer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/mirror_producer.properties b/system_test/mirror_maker/config/mirror_producer.properties
index b74c631..aa8be65 100644
--- a/system_test/mirror_maker/config/mirror_producer.properties
+++ b/system_test/mirror_maker/config/mirror_producer.properties
@@ -19,12 +19,12 @@ zk.connect=localhost:2183
 # broker.list=1:localhost:9094,2:localhost:9095
 
 # timeout in ms for connecting to zookeeper
-# zk.connectiontimeout.ms=1000000
+# zk.connection.timeout.ms=1000000
 
 producer.type=async
 
 # to avoid dropping events if the queue is full, wait indefinitely
-queue.enqueueTimeout.ms=-1
+queue.enqueue.timeout.ms=-1
 
 num.producers.per.broker=2
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/server_source_1_1.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/server_source_1_1.properties b/system_test/mirror_maker/config/server_source_1_1.properties
index d89c4fb..2f070a7 100644
--- a/system_test/mirror_maker/config/server_source_1_1.properties
+++ b/system_test/mirror_maker/config/server_source_1_1.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=1
+broker.id=1
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source-1-1-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=10000000
+log.segment.bytes=10000000
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/server_source_1_2.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/server_source_1_2.properties b/system_test/mirror_maker/config/server_source_1_2.properties
index 063d68b..f9353e8 100644
--- a/system_test/mirror_maker/config/server_source_1_2.properties
+++ b/system_test/mirror_maker/config/server_source_1_2.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=2
+broker.id=2
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source-1-2-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=536870912
+log.segment.bytes=536870912
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/server_source_2_1.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/server_source_2_1.properties b/system_test/mirror_maker/config/server_source_2_1.properties
index 998b460..daa01ad 100644
--- a/system_test/mirror_maker/config/server_source_2_1.properties
+++ b/system_test/mirror_maker/config/server_source_2_1.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=1
+broker.id=1
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source-2-1-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=536870912
+log.segment.bytes=536870912
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/server_source_2_2.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/server_source_2_2.properties b/system_test/mirror_maker/config/server_source_2_2.properties
index 81427ae..be6fdfc 100644
--- a/system_test/mirror_maker/config/server_source_2_2.properties
+++ b/system_test/mirror_maker/config/server_source_2_2.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=2
+broker.id=2
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-source-2-2-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=536870912
+log.segment.bytes=536870912
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,14 +63,14 @@ enable.zookeeper=true
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/server_target_1_1.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/server_target_1_1.properties b/system_test/mirror_maker/config/server_target_1_1.properties
index 0265f4e..d37955a 100644
--- a/system_test/mirror_maker/config/server_target_1_1.properties
+++ b/system_test/mirror_maker/config/server_target_1_1.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=1
+broker.id=1
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-target-1-1-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=536870912
+log.segment.bytes=536870912
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,16 +63,16 @@ enable.zookeeper=true
 zk.connect=localhost:2183
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 # topic partition count map
 # topic.partition.count.map=topic1:3, topic2:4

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/server_target_1_2.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/server_target_1_2.properties b/system_test/mirror_maker/config/server_target_1_2.properties
index a31e9ca..aa7546c 100644
--- a/system_test/mirror_maker/config/server_target_1_2.properties
+++ b/system_test/mirror_maker/config/server_target_1_2.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=2
+broker.id=2
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-target-1-2-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=536870912
+log.segment.bytes=536870912
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,16 +63,16 @@ enable.zookeeper=true
 zk.connect=localhost:2183
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 # topic partition count map
 # topic.partition.count.map=topic1:3, topic2:4

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/whitelisttest_1.consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/whitelisttest_1.consumer.properties b/system_test/mirror_maker/config/whitelisttest_1.consumer.properties
index 6ea85ec..ff12015 100644
--- a/system_test/mirror_maker/config/whitelisttest_1.consumer.properties
+++ b/system_test/mirror_maker/config/whitelisttest_1.consumer.properties
@@ -20,9 +20,9 @@
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 #consumer group id
-groupid=group1
-shallowiterator.enable=true
+group.id=group1
+shallow.iterator.enable=true
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker/config/whitelisttest_2.consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker/config/whitelisttest_2.consumer.properties b/system_test/mirror_maker/config/whitelisttest_2.consumer.properties
index e11112f..f1a902b 100644
--- a/system_test/mirror_maker/config/whitelisttest_2.consumer.properties
+++ b/system_test/mirror_maker/config/whitelisttest_2.consumer.properties
@@ -20,9 +20,9 @@
 zk.connect=localhost:2182
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 #consumer group id
-groupid=group1
-shallowiterator.enable=true
+group.id=group1
+shallow.iterator.enable=true
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker_testsuite/config/mirror_consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/mirror_consumer.properties b/system_test/mirror_maker_testsuite/config/mirror_consumer.properties
index 6517976..ea415e6 100644
--- a/system_test/mirror_maker_testsuite/config/mirror_consumer.properties
+++ b/system_test/mirror_maker_testsuite/config/mirror_consumer.properties
@@ -1,12 +1,12 @@
 zk.connect=localhost:2108
-zk.connectiontimeout.ms=1000000
-groupid=mm_regtest_grp
-autocommit.interval.ms=120000
-autooffset.reset=smallest
-#fetch.size=1048576
-#rebalance.retries.max=4
+zk.connection.timeout.ms=1000000
+group.id=mm_regtest_grp
+auto.commit.interval.ms=120000
+auto.offset.reset=smallest
+#fetch.message.max.bytes=1048576
+#rebalance.max.retries=4
 #rebalance.backoff.ms=2000
-socket.buffersize=1048576
-fetch.size=1048576
-zk.synctime.ms=15000
-shallowiterator.enable=true
+socket.receive.buffer.bytes=1048576
+fetch.message.max.bytes=1048576
+zk.sync.time.ms=15000
+shallow.iterator.enable=true

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker_testsuite/config/mirror_producer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/mirror_producer.properties b/system_test/mirror_maker_testsuite/config/mirror_producer.properties
index 3bb5a7b..7db5bfc 100644
--- a/system_test/mirror_maker_testsuite/config/mirror_producer.properties
+++ b/system_test/mirror_maker_testsuite/config/mirror_producer.properties
@@ -1,5 +1,5 @@
 producer.type=async
-queue.enqueueTimeout.ms=-1
+queue.enqueue.timeout.ms=-1
 broker.list=localhost:9094
 compression.codec=0
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/mirror_maker_testsuite/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/server.properties b/system_test/mirror_maker_testsuite/config/server.properties
index 8ef65ba..dacf158 100644
--- a/system_test/mirror_maker_testsuite/config/server.properties
+++ b/system_test/mirror_maker_testsuite/config/server.properties
@@ -17,12 +17,12 @@
 ############################# Server Basics #############################
 
 # The id of the broker. This must be set to a unique integer for each broker.
-brokerid=0
+broker.id=0
 
 # Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
 # from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
 # may not be what you want.
-#hostname=
+#host.name=
 
 
 ############################# Socket Server Settings #############################
@@ -31,19 +31,19 @@ brokerid=0
 port=9091
 
 # The number of threads handling network requests
-network.threads=2
+num.network.threads=2
  
 # The number of threads doing disk I/O
-io.threads=2
+num.io.threads=2
 
 # The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # The maximum size of a request that the socket server will accept (protection against OOM)
-max.socket.request.bytes=104857600
+socket.request.max.bytes=104857600
 
 
 ############################# Log Basics #############################
@@ -70,16 +70,16 @@ num.partitions=5
 # every N messages (or both). This can be done globally and overridden on a per-topic basis.
 
 # The number of messages to accept before forcing a flush of data to disk
-log.flush.interval=10000
+log.flush.interval.messages=10000
 
 # The maximum amount of time a message can sit in a log before we force a flush
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
-# Per-topic overrides for log.default.flush.interval.ms
-#topic.flush.intervals.ms=topic1:1000, topic2:3000
+# Per-topic overrides for log.flush.interval.ms
+#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
 
 # The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 ############################# Log Retention Policy #############################
 
@@ -92,13 +92,13 @@ log.default.flush.scheduler.interval.ms=1000
 log.retention.hours=168
 
 # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.size.
-#log.retention.size=1073741824
-log.retention.size=-1
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+log.retention.bytes=-1
 
 # The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.file.size=536870912
-log.file.size=102400
+#log.segment.size=536870912
+log.segment.bytes=102400
 
 # The interval at which log segments are checked to see if they can be deleted according 
 # to the retention policies
@@ -117,23 +117,23 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # Timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 monitoring.period.secs=1
-max.message.size=1000000
-max.queued.requests=500
+message.max.bytes=1000000
+queued.max.requests=500
 log.roll.hours=168
-log.index.max.size=10485760
+log.index.size.max.bytes=10485760
 log.index.interval.bytes=4096
-auto.create.topics=true
+auto.create.topics.enable=true
 controller.socket.timeout.ms=30000
 controller.message.queue.size=10
 default.replication.factor=1
-replica.max.lag.time.ms=10000
-replica.max.lag.bytes=4000
+replica.lag.time.max.ms=10000
+replica.lag.max.messages=4000
 replica.socket.timeout.ms=30000
-replica.socket.buffersize=65536
-replica.fetch.size=1048576
-replica.fetch.wait.time.ms=500
+replica.socket.receive.buffer.bytes=65536
+replica.fetch.max.bytes=1048576
+replica.fetch.wait.max.ms=500
 replica.fetch.min.bytes=4096
-replica.fetchers=1
+num.replica.fetchers=1

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/producer_perf/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/producer_perf/config/server.properties b/system_test/producer_perf/config/server.properties
index abd0765..9f8a633 100644
--- a/system_test/producer_perf/config/server.properties
+++ b/system_test/producer_perf/config/server.properties
@@ -15,12 +15,12 @@
 # see kafka.server.KafkaConfig for additional details and defaults
 
 # the id of the broker
-brokerid=0
+broker.id=0
 
 # hostname of broker. If not set, will pick up from the value returned
 # from getLocalHost.  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+# host.name=
 
 # number of logical partitions on this broker
 num.partitions=1
@@ -35,13 +35,13 @@ num.threads=8
 log.dir=/tmp/kafka-logs
 
 # the send buffer used by the socket server 
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # the receive buffer used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # the maximum size of a log segment
-log.file.size=536870912
+log.segment.bytes=536870912
 
 # the interval between running cleanup on the logs
 log.cleanup.interval.mins=1
@@ -50,7 +50,7 @@ log.cleanup.interval.mins=1
 log.retention.hours=168
 
 #the number of messages to accept without flushing the log to disk
-log.flush.interval=600
+log.flush.interval.messages=600
 
 #set the following properties to use zookeeper
 
@@ -63,16 +63,16 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 # time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+#log.flush.intervals.ms.per.topic=topic:1000
 
 # default time based flush interval in ms
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
 # time based topic flasher time rate in ms
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 # topic partition count map
 # topic.partition.count.map=topic1:3, topic2:4

http://git-wip-us.apache.org/repos/asf/kafka/blob/a4095319/system_test/replication_testsuite/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/server.properties b/system_test/replication_testsuite/config/server.properties
index 8ef65ba..dacf158 100644
--- a/system_test/replication_testsuite/config/server.properties
+++ b/system_test/replication_testsuite/config/server.properties
@@ -17,12 +17,12 @@
 ############################# Server Basics #############################
 
 # The id of the broker. This must be set to a unique integer for each broker.
-brokerid=0
+broker.id=0
 
 # Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
 # from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
 # may not be what you want.
-#hostname=
+#host.name=
 
 
 ############################# Socket Server Settings #############################
@@ -31,19 +31,19 @@ brokerid=0
 port=9091
 
 # The number of threads handling network requests
-network.threads=2
+num.network.threads=2
  
 # The number of threads doing disk I/O
-io.threads=2
+num.io.threads=2
 
 # The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer=1048576
+socket.send.buffer.bytes=1048576
 
 # The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer=1048576
+socket.receive.buffer.bytes=1048576
 
 # The maximum size of a request that the socket server will accept (protection against OOM)
-max.socket.request.bytes=104857600
+socket.request.max.bytes=104857600
 
 
 ############################# Log Basics #############################
@@ -70,16 +70,16 @@ num.partitions=5
 # every N messages (or both). This can be done globally and overridden on a per-topic basis.
 
 # The number of messages to accept before forcing a flush of data to disk
-log.flush.interval=10000
+log.flush.interval.messages=10000
 
 # The maximum amount of time a message can sit in a log before we force a flush
-log.default.flush.interval.ms=1000
+log.flush.interval.ms=1000
 
-# Per-topic overrides for log.default.flush.interval.ms
-#topic.flush.intervals.ms=topic1:1000, topic2:3000
+# Per-topic overrides for log.flush.interval.ms
+#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
 
 # The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.default.flush.scheduler.interval.ms=1000
+log.flush.scheduler.interval.ms=1000
 
 ############################# Log Retention Policy #############################
 
@@ -92,13 +92,13 @@ log.default.flush.scheduler.interval.ms=1000
 log.retention.hours=168
 
 # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.size.
-#log.retention.size=1073741824
-log.retention.size=-1
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+log.retention.bytes=-1
 
 # The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.file.size=536870912
-log.file.size=102400
+#log.segment.size=536870912
+log.segment.bytes=102400
 
 # The interval at which log segments are checked to see if they can be deleted according 
 # to the retention policies
@@ -117,23 +117,23 @@ enable.zookeeper=true
 zk.connect=localhost:2181
 
 # Timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+zk.connection.timeout.ms=1000000
 
 monitoring.period.secs=1
-max.message.size=1000000
-max.queued.requests=500
+message.max.bytes=1000000
+queued.max.requests=500
 log.roll.hours=168
-log.index.max.size=10485760
+log.index.size.max.bytes=10485760
 log.index.interval.bytes=4096
-auto.create.topics=true
+auto.create.topics.enable=true
 controller.socket.timeout.ms=30000
 controller.message.queue.size=10
 default.replication.factor=1
-replica.max.lag.time.ms=10000
-replica.max.lag.bytes=4000
+replica.lag.time.max.ms=10000
+replica.lag.max.messages=4000
 replica.socket.timeout.ms=30000
-replica.socket.buffersize=65536
-replica.fetch.size=1048576
-replica.fetch.wait.time.ms=500
+replica.socket.receive.buffer.bytes=65536
+replica.fetch.max.bytes=1048576
+replica.fetch.wait.max.ms=500
 replica.fetch.min.bytes=4096
-replica.fetchers=1
+num.replica.fetchers=1