You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@samza.apache.org by ca...@apache.org on 2020/06/22 22:53:01 UTC

[samza] branch master updated: SAMZA-2551: Upgrade all modules to automatically use checkstyle 6.11.2 (part 1: includes samza-core) (#1389)

This is an automated email from the ASF dual-hosted git repository.

cameronlee pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/samza.git


The following commit(s) were added to refs/heads/master by this push:
     new 5f05cdd  SAMZA-2551: Upgrade all modules to automatically use checkstyle 6.11.2 (part 1: includes samza-core) (#1389)
5f05cdd is described below

commit 5f05cdde2eea46a8cbda08dd8c9868c8f46a4e9d
Author: Cameron Lee <ca...@linkedin.com>
AuthorDate: Mon Jun 22 15:52:51 2020 -0700

    SAMZA-2551: Upgrade all modules to automatically use checkstyle 6.11.2 (part 1: includes samza-core) (#1389)
    
    API/Upgrade/Usage changes: N/A
---
 build.gradle                                       |   3 +-
 .../classloader/IsolatingClassLoaderFactory.java   |   2 +-
 .../clustermanager/ClusterBasedJobCoordinator.java |  50 +-
 .../clustermanager/ContainerProcessManager.java    |   4 +-
 .../samza/clustermanager/ResourceRequestState.java |   8 +-
 .../clustermanager/StandbyContainerManager.java    |  26 +-
 .../java/org/apache/samza/config/JobConfig.java    |  30 +-
 .../java/org/apache/samza/config/StreamConfig.java |   6 +-
 .../java/org/apache/samza/config/SystemConfig.java |   4 +-
 .../samza/container/ContainerHeartbeatMonitor.java |  22 +-
 .../apache/samza/container/LocalityManager.java    |  14 +-
 .../java/org/apache/samza/container/RunLoop.java   |   4 +-
 .../stream/AllSspToSingleTaskGrouperFactory.java   |   8 +-
 .../container/grouper/stream/SSPGrouperProxy.java  |  18 +-
 .../grouper/task/GroupByContainerIds.java          |  10 +-
 .../grouper/task/TaskAssignmentManager.java        |  24 +-
 .../NamespaceAwareCoordinatorStreamStore.java      |  16 +-
 .../java/org/apache/samza/execution/JobGraph.java  | 104 ++---
 .../samza/execution/JobGraphJsonGenerator.java     |  20 +-
 .../java/org/apache/samza/execution/JobNode.java   |  12 +-
 .../execution/JobNodeConfigurationGenerator.java   |  96 ++--
 .../org/apache/samza/execution/StreamEdge.java     |   4 +-
 .../org/apache/samza/execution/StreamManager.java  |   6 +-
 .../samza/operators/impl/ControlMessageSender.java |  12 +-
 .../samza/operators/impl/EndOfStreamStates.java    |   4 +-
 .../apache/samza/operators/impl/OperatorImpl.java  |  62 +--
 .../samza/operators/impl/OperatorImplGraph.java    |  42 +-
 .../samza/operators/impl/WatermarkMetrics.java     |   4 +-
 .../samza/operators/impl/WatermarkStates.java      |  12 +-
 .../triggers/TimeSinceFirstMessageTriggerImpl.java |   6 +-
 .../samza/operators/triggers/TimeTriggerImpl.java  |   6 +-
 .../apache/samza/runtime/ContainerLaunchUtil.java  |  16 +-
 .../samza/runtime/LocalApplicationRunner.java      |  36 +-
 .../samza/runtime/RemoteApplicationRunner.java     |   8 +-
 .../apache/samza/scheduler/EpochTimeScheduler.java |  12 +-
 .../samza/storage/ChangelogStreamManager.java      |  86 ++--
 .../NonTransactionalStateTaskRestoreManager.java   |  96 ++--
 .../org/apache/samza/storage/StorageRecovery.java  |  32 +-
 .../apache/samza/storage/TaskSideInputHandler.java |  58 +--
 .../samza/storage/TaskSideInputStorageManager.java |  80 ++--
 .../TransactionalStateTaskRestoreManager.java      | 512 ++++++++++-----------
 .../samza/system/inmemory/InMemoryManager.java     |  67 ++-
 .../samza/system/inmemory/InMemorySystemAdmin.java |   6 +-
 .../system/inmemory/InMemorySystemConsumer.java    |   6 +-
 .../apache/samza/table/TableConfigGenerator.java   |  30 +-
 .../java/org/apache/samza/table/TableManager.java  |   6 +-
 .../samza/table/batching/BatchProcessor.java       |  14 +-
 .../samza/table/batching/TableBatchHandler.java    |  28 +-
 .../apache/samza/table/caching/CachingTable.java   | 146 +++---
 .../samza/table/caching/guava/GuavaCacheTable.java |  12 +-
 .../table/ratelimit/AsyncRateLimitedTable.java     |  32 +-
 .../org/apache/samza/table/remote/RemoteTable.java |  66 +--
 .../samza/table/remote/RemoteTableProvider.java    |  40 +-
 .../apache/samza/table/retry/FailsafeAdapter.java  |  18 +-
 .../org/apache/samza/task/StreamOperatorTask.java  |  12 +-
 .../samza/util/EmbeddedTaggedRateLimiter.java      |  50 +-
 .../java/org/apache/samza/util/ReflectionUtil.java |   8 +-
 .../src/main/java/org/apache/samza/util/Util.java  |   6 +-
 .../samza/zk/ZkBarrierForVersionUpgrade.java       |  22 +-
 .../java/org/apache/samza/zk/ZkJobCoordinator.java |  80 ++--
 .../samza/storage/ContainerStorageManager.java     | 238 +++++-----
 .../TestStreamApplicationDescriptorImpl.java       | 120 ++---
 .../TestContainerAllocatorWithHostAffinity.java    |   8 +-
 .../TestContainerAllocatorWithoutHostAffinity.java |   8 +-
 .../TestContainerPlacementActions.java             | 130 +++---
 .../TestContainerProcessManager.java               |   4 +-
 .../samza/clustermanager/TestStandbyAllocator.java |  16 +-
 .../org/apache/samza/config/TestStreamConfig.java  |  66 +--
 .../container/TestContainerHeartbeatMonitor.java   |  20 +-
 .../org/apache/samza/container/TestRunLoop.java    | 247 +++++-----
 .../samza/diagnostics/TestDiagnosticsManager.java  |  10 +-
 .../samza/execution/TestExecutionPlanner.java      | 340 +++++++-------
 .../samza/execution/TestJobGraphJsonGenerator.java | 113 +++--
 .../TestJobNodeConfigurationGenerator.java         |   4 +-
 .../apache/samza/operators/TestJoinOperator.java   |  46 +-
 .../operators/impl/TestControlMessageSender.java   |  20 +-
 .../operators/impl/TestOperatorImplGraph.java      | 170 +++----
 .../samza/operators/impl/TestWindowOperator.java   |  46 +-
 .../impl/store/TestTimeSeriesStoreImpl.java        |   8 +-
 .../samza/operators/spec/TestOperatorSpec.java     |   4 +-
 .../spec/TestPartitionByOperatorSpec.java          |  42 +-
 .../samza/processor/TestStreamProcessor.java       | 169 ++++---
 ...TestClusterBasedProcessorLifecycleListener.java |   8 +-
 .../samza/runtime/TestLocalApplicationRunner.java  | 105 ++---
 .../samza/scheduler/TestEpochTimeScheduler.java    |  84 ++--
 .../samza/storage/TestTaskSideInputHandler.java    |  61 ++-
 .../storage/TestTaskSideInputStorageManager.java   |   6 +-
 .../TestTransactionalStateTaskRestoreManager.java  | 208 ++++-----
 .../org/apache/samza/system/MockSystemFactory.java |  46 +-
 .../apache/samza/system/TestSSPMetadataCache.java  |  12 +-
 .../samza/table/caching/TestCachingTable.java      |  50 +-
 .../apache/samza/table/remote/TestRemoteTable.java |  14 +-
 .../samza/table/retry/TestAsyncRetriableTable.java |  36 +-
 .../apache/samza/task/TestStreamOperatorTask.java  |   6 +-
 .../apache/samza/util/TestSplitDeploymentUtil.java |  10 +-
 .../samza/zk/TestScheduleAfterDebounceTime.java    |  19 +-
 .../test/java/org/apache/samza/zk/TestZkUtils.java |  14 +-
 .../samza/storage/TestContainerStorageManager.java |  38 +-
 98 files changed, 2371 insertions(+), 2399 deletions(-)

diff --git a/build.gradle b/build.gradle
index dfbe2e6..05f4066 100644
--- a/build.gradle
+++ b/build.gradle
@@ -209,7 +209,8 @@ project(":samza-core_$scalaSuffix") {
 
   checkstyle {
     configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
+    // temporarily hardcode 6.11.2 until all other modules are upgraded
+    toolVersion = "6.11.2"
   }
 
   test {
diff --git a/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java b/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java
index 19e776e..344a034 100644
--- a/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java
+++ b/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java
@@ -244,7 +244,7 @@ public class IsolatingClassLoaderFactory {
     apiParentRelationshipBuilder.addDelegatePreferredClassPredicate(new BootstrapClassPredicate());
     // the classes which are Samza framework API classes are added here
     getFrameworkApiClassGlobs(apiLibDirectory).forEach(
-        apiClassName -> apiParentRelationshipBuilder.addDelegatePreferredClassPredicate(new GlobMatcher(apiClassName)));
+      apiClassName -> apiParentRelationshipBuilder.addDelegatePreferredClassPredicate(new GlobMatcher(apiClassName)));
     return apiParentRelationshipBuilder.build();
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java
index ed99ad4..8482a3b 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java
@@ -382,13 +382,13 @@ public class ClusterBasedJobCoordinator {
     return Optional.of(new StreamPartitionCountMonitor(inputStreamsToMonitor, streamMetadata, metrics,
         new JobConfig(config).getMonitorPartitionChangeFrequency(), streamsChanged -> {
       // Fail the jobs with durable state store. Otherwise, application state.status remains UNDEFINED s.t. YARN job will be restarted
-        if (hasDurableStores) {
-          LOG.error("Input topic partition count changed in a job with durable state. Failing the job. " +
-              "Changed topics: {}", streamsChanged.toString());
-          state.status = SamzaApplicationState.SamzaAppStatus.FAILED;
-        }
-        coordinatorException = new PartitionChangeException("Input topic partition count changes detected for topics: " + streamsChanged.toString());
-      }));
+      if (hasDurableStores) {
+        LOG.error("Input topic partition count changed in a job with durable state. Failing the job. " +
+            "Changed topics: {}", streamsChanged.toString());
+        state.status = SamzaApplicationState.SamzaAppStatus.FAILED;
+      }
+      coordinatorException = new PartitionChangeException("Input topic partition count changes detected for topics: " + streamsChanged.toString());
+    }));
   }
 
   private Optional<StreamRegexMonitor> getInputRegexMonitor(Config config, SystemAdmins systemAdmins, Set<SystemStream> inputStreamsToMonitor) {
@@ -465,9 +465,9 @@ public class ClusterBasedJobCoordinator {
    */
   public static void main(String[] args) {
     Thread.setDefaultUncaughtExceptionHandler((thread, exception) -> {
-        LOG.error("Uncaught exception in ClusterBasedJobCoordinator::main. Exiting job coordinator", exception);
-        System.exit(1);
-      });
+      LOG.error("Uncaught exception in ClusterBasedJobCoordinator::main. Exiting job coordinator", exception);
+      System.exit(1);
+    });
     if (!SplitDeploymentUtil.isSplitDeploymentEnabled()) {
       // no isolation enabled, so can just execute runClusterBasedJobCoordinator directly
       runClusterBasedJobCoordinator(args);
@@ -579,21 +579,21 @@ public class ClusterBasedJobCoordinator {
     List<String> args = new ArrayList<>(config.size() * 2);
 
     config.forEach((key, value) -> {
-        if (key.equals(ApplicationConfig.APP_MAIN_ARGS)) {
-          /*
-           * Converts native beam pipeline options such as
-           * --runner=SamzaRunner --maxSourceParallelism=1024
-           */
-          args.addAll(Arrays.asList(value.split("\\s")));
-        } else {
-          /*
-           * Converts native Samza configs to config override format such as
-           * --config job.name=test
-           */
-          args.add("--config");
-          args.add(String.format("%s=%s", key, value));
-        }
-      });
+      if (key.equals(ApplicationConfig.APP_MAIN_ARGS)) {
+        /*
+         * Converts native beam pipeline options such as
+         * --runner=SamzaRunner --maxSourceParallelism=1024
+         */
+        args.addAll(Arrays.asList(value.split("\\s")));
+      } else {
+        /*
+         * Converts native Samza configs to config override format such as
+         * --config job.name=test
+         */
+        args.add("--config");
+        args.add(String.format("%s=%s", key, value));
+      }
+    });
 
     return args.toArray(new String[0]);
   }
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java
index c54918d..f6e3b1f 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java
@@ -187,8 +187,8 @@ public class ContainerProcessManager implements ClusterResourceManager.Callback
     this.containerManager = containerManager;
     this.diagnosticsManager = Option.empty();
     this.containerAllocator = allocator.orElseGet(
-        () -> new ContainerAllocator(this.clusterResourceManager, clusterManagerConfig, state,
-            hostAffinityEnabled, this.containerManager));
+      () -> new ContainerAllocator(this.clusterResourceManager, clusterManagerConfig, state,
+          hostAffinityEnabled, this.containerManager));
     this.allocatorThread = new Thread(this.containerAllocator, "Container Allocator Thread");
     LOG.info("Finished container process manager initialization");
   }
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java
index 2e4bcdb..69646b7 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java
@@ -258,10 +258,10 @@ public class ResourceRequestState {
 
     synchronized (lock) {
       allocatedResources.values().forEach(resources -> {
-          if (resources != null) {
-            resources.removeIf(r -> containerId.equals(r.getContainerId()));
-          }
-        });
+        if (resources != null) {
+          resources.removeIf(r -> containerId.equals(r.getContainerId()));
+        }
+      });
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java b/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java
index 10d5f09..30d0de9 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java
@@ -143,8 +143,8 @@ public class StandbyContainerManager {
 
       // if we find a metadata entry, we continue with the failover (select another standby or any-host appropriately)
       failoverMetadata.ifPresent(
-          metadata -> initiateStandbyAwareAllocation(metadata.activeContainerID, metadata.activeContainerResourceID,
-              containerAllocator));
+        metadata -> initiateStandbyAwareAllocation(metadata.activeContainerID, metadata.activeContainerResourceID,
+            containerAllocator));
     } else {
       // If this class receives a callback for stop-fail on an active container, throw an exception
       throw new SamzaException("Invalid State. Received stop container fail for container Id: " + containerID);
@@ -216,10 +216,10 @@ public class StandbyContainerManager {
 
       Map<String, SamzaResource> runningStandbyContainersOnHost = new HashMap<>();
       this.samzaApplicationState.runningProcessors.forEach((samzaContainerId, samzaResource) -> {
-          if (standbySamzaContainerIds.contains(samzaContainerId) && samzaResource.getHost().equals(standbyHost)) {
-            runningStandbyContainersOnHost.put(samzaContainerId, samzaResource);
-          }
-        });
+        if (standbySamzaContainerIds.contains(samzaContainerId) && samzaResource.getHost().equals(standbyHost)) {
+          runningStandbyContainersOnHost.put(samzaContainerId, samzaResource);
+        }
+      });
 
       if (runningStandbyContainersOnHost.isEmpty()) {
         // if there are no running standby-containers on the standbyHost, we proceed to directly make a resource request
@@ -239,13 +239,13 @@ public class StandbyContainerManager {
         FailoverMetadata failoverMetadata = this.registerActiveContainerFailure(activeContainerID, resourceID);
 
         runningStandbyContainersOnHost.forEach((standbyContainerID, standbyResource) -> {
-            log.info("Initiating failover and stopping standby container, found standbyContainer {} = resource {}, "
-                    + "for active container {}", runningStandbyContainersOnHost.keySet(),
-                runningStandbyContainersOnHost.values(), activeContainerID);
-            failoverMetadata.updateStandbyContainer(standbyResource.getContainerId(), standbyResource.getHost());
-            samzaApplicationState.failoversToStandby.incrementAndGet();
-            this.clusterResourceManager.stopStreamProcessor(standbyResource);
-          });
+          log.info("Initiating failover and stopping standby container, found standbyContainer {} = resource {}, "
+                  + "for active container {}", runningStandbyContainersOnHost.keySet(),
+              runningStandbyContainersOnHost.values(), activeContainerID);
+          failoverMetadata.updateStandbyContainer(standbyResource.getContainerId(), standbyResource.getHost());
+          samzaApplicationState.failoversToStandby.incrementAndGet();
+          this.clusterResourceManager.stopStreamProcessor(standbyResource);
+        });
 
         // if multiple standbys are on the same host, we are in an invalid state, so we fail the deploy and retry
         if (runningStandbyContainersOnHost.size() > 1) {
diff --git a/samza-core/src/main/java/org/apache/samza/config/JobConfig.java b/samza-core/src/main/java/org/apache/samza/config/JobConfig.java
index dc03644..0b274e9 100644
--- a/samza-core/src/main/java/org/apache/samza/config/JobConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/JobConfig.java
@@ -220,20 +220,20 @@ public class JobConfig extends MapConfig {
   public Map<String, Pattern> getMonitorRegexPatternMap(String rewritersList) {
     Map<String, Pattern> inputRegexesToMonitor = new HashMap<>();
     Stream.of(rewritersList.split(",")).forEach(rewriterName -> {
-        Optional<String> rewriterSystem = getRegexResolvedSystem(rewriterName);
-        Optional<String> rewriterRegex = getRegexResolvedStreams(rewriterName);
-        if (rewriterSystem.isPresent() && rewriterRegex.isPresent()) {
-          Pattern newPatternForSystem;
-          Pattern existingPatternForSystem = inputRegexesToMonitor.get(rewriterSystem.get());
-          if (existingPatternForSystem == null) {
-            newPatternForSystem = Pattern.compile(rewriterRegex.get());
-          } else {
-            newPatternForSystem =
-                Pattern.compile(String.join("|", existingPatternForSystem.pattern(), rewriterRegex.get()));
-          }
-          inputRegexesToMonitor.put(rewriterSystem.get(), newPatternForSystem);
+      Optional<String> rewriterSystem = getRegexResolvedSystem(rewriterName);
+      Optional<String> rewriterRegex = getRegexResolvedStreams(rewriterName);
+      if (rewriterSystem.isPresent() && rewriterRegex.isPresent()) {
+        Pattern newPatternForSystem;
+        Pattern existingPatternForSystem = inputRegexesToMonitor.get(rewriterSystem.get());
+        if (existingPatternForSystem == null) {
+          newPatternForSystem = Pattern.compile(rewriterRegex.get());
+        } else {
+          newPatternForSystem =
+              Pattern.compile(String.join("|", existingPatternForSystem.pattern(), rewriterRegex.get()));
         }
-      });
+        inputRegexesToMonitor.put(rewriterSystem.get(), newPatternForSystem);
+      }
+    });
     return inputRegexesToMonitor;
   }
 
@@ -293,13 +293,13 @@ public class JobConfig extends MapConfig {
   public String getSSPMatcherConfigRegex() {
     return Optional.ofNullable(get(SSP_MATCHER_CONFIG_REGEX))
         .orElseThrow(
-            () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_REGEX)));
+          () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_REGEX)));
   }
 
   public String getSSPMatcherConfigRanges() {
     return Optional.ofNullable(get(SSP_MATCHER_CONFIG_RANGES))
         .orElseThrow(
-            () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_RANGES)));
+          () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_RANGES)));
   }
 
   public String getSSPMatcherConfigJobFactoryRegex() {
diff --git a/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java b/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java
index 8ee044e..950cf10 100644
--- a/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java
@@ -282,9 +282,9 @@ public class StreamConfig extends MapConfig {
     Set<SystemStream> legacySystemStreams = subConf.keySet().stream()
       .filter(k -> k.endsWith(MSG_SERDE) || k.endsWith(KEY_SERDE))
       .map(k -> {
-          String streamName = k.substring(0, k.length() - 16 /* .samza.XXX.serde length */);
-          return new SystemStream(systemName, streamName);
-        })
+        String streamName = k.substring(0, k.length() - 16 /* .samza.XXX.serde length */);
+        return new SystemStream(systemName, streamName);
+      })
       .collect(Collectors.toSet());
 
     Set<SystemStream> systemStreams = subset(STREAMS_PREFIX).keySet().stream()
diff --git a/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java b/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java
index 7b44a3a..a2da35d 100644
--- a/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java
@@ -92,8 +92,8 @@ public class SystemConfig extends MapConfig {
     return getSystemFactories().entrySet()
         .stream()
         .collect(Collectors.toMap(Entry::getKey,
-            systemNameToFactoryEntry -> systemNameToFactoryEntry.getValue()
-                .getAdmin(systemNameToFactoryEntry.getKey(), this)));
+          systemNameToFactoryEntry -> systemNameToFactoryEntry.getValue()
+              .getAdmin(systemNameToFactoryEntry.getKey(), this)));
   }
 
   /**
diff --git a/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java b/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java
index 1a131c3..89b5fc9 100644
--- a/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java
+++ b/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java
@@ -63,17 +63,17 @@ public class ContainerHeartbeatMonitor {
     }
     LOG.info("Starting ContainerHeartbeatMonitor");
     scheduler.scheduleAtFixedRate(() -> {
-        ContainerHeartbeatResponse response = containerHeartbeatClient.requestHeartbeat();
-        if (!response.isAlive()) {
-          scheduler.schedule(() -> {
-              // On timeout of container shutting down, force exit.
-              LOG.error("Graceful shutdown timeout expired. Force exiting.");
-              ThreadUtil.logThreadDump("Thread dump at heartbeat monitor shutdown timeout.");
-              System.exit(1);
-            }, SHUTDOWN_TIMOUT_MS, TimeUnit.MILLISECONDS);
-          onContainerExpired.run();
-        }
-      }, 0, SCHEDULE_MS, TimeUnit.MILLISECONDS);
+      ContainerHeartbeatResponse response = containerHeartbeatClient.requestHeartbeat();
+      if (!response.isAlive()) {
+        scheduler.schedule(() -> {
+          // On timeout of container shutting down, force exit.
+          LOG.error("Graceful shutdown timeout expired. Force exiting.");
+          ThreadUtil.logThreadDump("Thread dump at heartbeat monitor shutdown timeout.");
+          System.exit(1);
+        }, SHUTDOWN_TIMOUT_MS, TimeUnit.MILLISECONDS);
+        onContainerExpired.run();
+      }
+    }, 0, SCHEDULE_MS, TimeUnit.MILLISECONDS);
     started = true;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java b/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java
index 864b558..34baad0 100644
--- a/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java
+++ b/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java
@@ -61,13 +61,13 @@ public class LocalityManager {
   public Map<String, Map<String, String>> readContainerLocality() {
     Map<String, Map<String, String>> allMappings = new HashMap<>();
     metadataStore.all().forEach((containerId, valueBytes) -> {
-        if (valueBytes != null) {
-          String locationId = valueSerde.fromBytes(valueBytes);
-          Map<String, String> values = new HashMap<>();
-          values.put(SetContainerHostMapping.HOST_KEY, locationId);
-          allMappings.put(containerId, values);
-        }
-      });
+      if (valueBytes != null) {
+        String locationId = valueSerde.fromBytes(valueBytes);
+        Map<String, String> values = new HashMap<>();
+        values.put(SetContainerHostMapping.HOST_KEY, locationId);
+        allMappings.put(containerId, values);
+      }
+    });
     if (LOG.isDebugEnabled()) {
       for (Map.Entry<String, Map<String, String>> entry : allMappings.entrySet()) {
         LOG.debug(String.format("Locality for container %s: %s", entry.getKey(), entry.getValue()));
diff --git a/samza-core/src/main/java/org/apache/samza/container/RunLoop.java b/samza-core/src/main/java/org/apache/samza/container/RunLoop.java
index 2917c83..f0968fc 100644
--- a/samza-core/src/main/java/org/apache/samza/container/RunLoop.java
+++ b/samza-core/src/main/java/org/apache/samza/container/RunLoop.java
@@ -397,8 +397,8 @@ public class RunLoop implements Runnable, Throttleable {
       final EpochTimeScheduler epochTimeScheduler = task.epochTimeScheduler();
       if (epochTimeScheduler != null) {
         epochTimeScheduler.registerListener(() -> {
-            state.needScheduler();
-          });
+          state.needScheduler();
+        });
       }
     }
 
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java
index dc9b2f4..a8aaecc 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java
@@ -61,10 +61,10 @@ class AllSspToSingleTaskGrouper implements SystemStreamPartitionGrouper {
     }
 
     processorList.forEach(processor -> {
-        // Create a task name for each processor and assign all partitions to each task name.
-        final TaskName taskName = new TaskName(String.format("Task-%s", processor));
-        groupedMap.put(taskName, ssps);
-      });
+      // Create a task name for each processor and assign all partitions to each task name.
+      final TaskName taskName = new TaskName(String.format("Task-%s", processor));
+      groupedMap.put(taskName, ssps);
+    });
 
     return groupedMap;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java
index 6507046..808f488 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java
@@ -166,11 +166,11 @@ public class SSPGrouperProxy {
   private Map<SystemStream, Integer> getSystemStreamToPartitionCount(Map<TaskName, List<SystemStreamPartition>> taskToSSPAssignment) {
     Map<SystemStream, Integer> systemStreamToPartitionCount = new HashMap<>();
     taskToSSPAssignment.forEach((taskName, systemStreamPartitions) -> {
-        systemStreamPartitions.forEach(systemStreamPartition -> {
-            SystemStream systemStream = systemStreamPartition.getSystemStream();
-            systemStreamToPartitionCount.put(systemStream, systemStreamToPartitionCount.getOrDefault(systemStream, 0) + 1);
-          });
+      systemStreamPartitions.forEach(systemStreamPartition -> {
+        SystemStream systemStream = systemStreamPartition.getSystemStream();
+        systemStreamToPartitionCount.put(systemStream, systemStreamToPartitionCount.getOrDefault(systemStream, 0) + 1);
       });
+    });
 
     return systemStreamToPartitionCount;
   }
@@ -185,12 +185,12 @@ public class SSPGrouperProxy {
     Map<SystemStreamPartition, TaskName> sspToTaskMapping = new HashMap<>();
     Map<TaskName, List<SystemStreamPartition>> previousTaskToSSPAssignment = grouperMetadata.getPreviousTaskToSSPAssignment();
     previousTaskToSSPAssignment.forEach((taskName, systemStreamPartitions) -> {
-        systemStreamPartitions.forEach(systemStreamPartition -> {
-            if (!broadcastSystemStreamPartitions.contains(systemStreamPartition)) {
-              sspToTaskMapping.put(systemStreamPartition, taskName);
-            }
-          });
+      systemStreamPartitions.forEach(systemStreamPartition -> {
+        if (!broadcastSystemStreamPartitions.contains(systemStreamPartition)) {
+          sspToTaskMapping.put(systemStreamPartition, taskName);
+        }
       });
+    });
     return sspToTaskMapping;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java b/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java
index 7c11da4..aec7215 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java
@@ -172,11 +172,11 @@ public class GroupByContainerIds implements TaskNameGrouper {
 
     // Generate the {@see LocationId} to processors mapping and processorId to {@see TaskGroup} mapping.
     processorLocality.forEach((processorId, locationId) -> {
-        List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>());
-        processorIds.add(processorId);
-        locationIdToProcessors.put(locationId, processorIds);
-        processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>()));
-      });
+      List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>());
+      processorIds.add(processorId);
+      locationIdToProcessors.put(locationId, processorIds);
+      processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>()));
+    });
 
     int numTasksPerProcessor = taskModels.size() / processorLocality.size();
     Set<TaskName> assignedTasks = new HashSet<>();
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java b/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java
index e9fcadb..0a22c2d 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java
@@ -74,24 +74,24 @@ public class TaskAssignmentManager {
   public Map<String, String> readTaskAssignment() {
     taskNameToContainerId.clear();
     taskContainerMappingMetadataStore.all().forEach((taskName, valueBytes) -> {
-        String containerId = containerIdSerde.fromBytes(valueBytes);
-        if (containerId != null) {
-          taskNameToContainerId.put(taskName, containerId);
-        }
-        LOG.debug("Assignment for task {}: {}", taskName, containerId);
-      });
+      String containerId = containerIdSerde.fromBytes(valueBytes);
+      if (containerId != null) {
+        taskNameToContainerId.put(taskName, containerId);
+      }
+      LOG.debug("Assignment for task {}: {}", taskName, containerId);
+    });
     return Collections.unmodifiableMap(new HashMap<>(taskNameToContainerId));
   }
 
   public Map<TaskName, TaskMode> readTaskModes() {
     Map<TaskName, TaskMode> taskModeMap = new HashMap<>();
     taskModeMappingMetadataStore.all().forEach((taskName, valueBytes) -> {
-        String taskMode = taskModeSerde.fromBytes(valueBytes);
-        if (taskMode != null) {
-          taskModeMap.put(new TaskName(taskName), TaskMode.valueOf(taskMode));
-        }
-        LOG.debug("Task mode assignment for task {}: {}", taskName, taskMode);
-      });
+      String taskMode = taskModeSerde.fromBytes(valueBytes);
+      if (taskMode != null) {
+        taskModeMap.put(new TaskName(taskName), TaskMode.valueOf(taskMode));
+      }
+      LOG.debug("Task mode assignment for task {}: {}", taskName, taskMode);
+    });
     return Collections.unmodifiableMap(new HashMap<>(taskModeMap));
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java b/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java
index f4cd527..f5b99d7 100644
--- a/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java
+++ b/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java
@@ -106,15 +106,15 @@ public class NamespaceAwareCoordinatorStreamStore implements MetadataStore {
     Map<String, byte[]> bootstrappedMessages = new HashMap<>();
     Map<String, byte[]> coordinatorStreamMessages = metadataStore.all();
     coordinatorStreamMessages.forEach((coordinatorMessageKeyAsJson, value) -> {
-        CoordinatorMessageKey coordinatorMessageKey = CoordinatorStreamStore.deserializeCoordinatorMessageKeyFromJson(coordinatorMessageKeyAsJson);
-        if (Objects.equals(namespace, coordinatorMessageKey.getNamespace())) {
-          if (value != null) {
-            bootstrappedMessages.put(coordinatorMessageKey.getKey(), value);
-          } else {
-            bootstrappedMessages.remove(coordinatorMessageKey.getKey());
-          }
+      CoordinatorMessageKey coordinatorMessageKey = CoordinatorStreamStore.deserializeCoordinatorMessageKeyFromJson(coordinatorMessageKeyAsJson);
+      if (Objects.equals(namespace, coordinatorMessageKey.getNamespace())) {
+        if (value != null) {
+          bootstrappedMessages.put(coordinatorMessageKey.getKey(), value);
+        } else {
+          bootstrappedMessages.remove(coordinatorMessageKey.getKey());
         }
-      });
+      }
+    });
 
     return bootstrappedMessages;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java b/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java
index 0da1fd5..72b3cee 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java
@@ -285,15 +285,15 @@ import org.slf4j.LoggerFactory;
    */
   private void validateInputStreams() {
     inputStreams.forEach(edge -> {
-        if (!edge.getSourceNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Source stream %s should not have producers.", edge.getName()));
-        }
-        if (edge.getTargetNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Source stream %s should have consumers.", edge.getName()));
-        }
-      });
+      if (!edge.getSourceNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Source stream %s should not have producers.", edge.getName()));
+      }
+      if (edge.getTargetNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Source stream %s should have consumers.", edge.getName()));
+      }
+    });
   }
 
   /**
@@ -301,15 +301,15 @@ import org.slf4j.LoggerFactory;
    */
   private void validateOutputStreams() {
     outputStreams.forEach(edge -> {
-        if (!edge.getTargetNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Sink stream %s should not have consumers", edge.getName()));
-        }
-        if (edge.getSourceNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Sink stream %s should have producers", edge.getName()));
-        }
-      });
+      if (!edge.getTargetNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Sink stream %s should not have consumers", edge.getName()));
+      }
+      if (edge.getSourceNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Sink stream %s should have producers", edge.getName()));
+      }
+    });
   }
 
   /**
@@ -322,11 +322,11 @@ import org.slf4j.LoggerFactory;
     internalEdges.removeAll(outputStreams);
 
     internalEdges.forEach(edge -> {
-        if (edge.getSourceNodes().isEmpty() || edge.getTargetNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Internal stream %s should have both producers and consumers", edge.getName()));
-        }
-      });
+      if (edge.getSourceNodes().isEmpty() || edge.getTargetNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Internal stream %s should have both producers and consumers", edge.getName()));
+      }
+    });
   }
 
   /**
@@ -352,19 +352,19 @@ import org.slf4j.LoggerFactory;
     Set<JobNode> visited = new HashSet<>();
 
     inputStreams.forEach(input -> {
-        List<JobNode> next = input.getTargetNodes();
-        queue.addAll(next);
-        visited.addAll(next);
-      });
+      List<JobNode> next = input.getTargetNodes();
+      queue.addAll(next);
+      visited.addAll(next);
+    });
 
     while (!queue.isEmpty()) {
       JobNode node = queue.poll();
       node.getOutEdges().values().stream().flatMap(edge -> edge.getTargetNodes().stream()).forEach(target -> {
-          if (!visited.contains(target)) {
-            visited.add(target);
-            queue.offer(target);
-          }
-        });
+        if (!visited.contains(target)) {
+          visited.add(target);
+          queue.offer(target);
+        }
+      });
     }
 
     return visited;
@@ -385,17 +385,17 @@ import org.slf4j.LoggerFactory;
     Map<String, Long> indegree = new HashMap<>();
     Set<JobNode> visited = new HashSet<>();
     pnodes.forEach(node -> {
-        String nid = node.getJobNameAndId();
-        //only count the degrees of intermediate streams
-        long degree = node.getInEdges().values().stream().filter(e -> !inputStreams.contains(e)).count();
-        indegree.put(nid, degree);
-
-        if (degree == 0L) {
-          // start from the nodes that has no intermediate input streams, so it only consumes from input streams
-          q.add(node);
-          visited.add(node);
-        }
-      });
+      String nid = node.getJobNameAndId();
+      //only count the degrees of intermediate streams
+      long degree = node.getInEdges().values().stream().filter(e -> !inputStreams.contains(e)).count();
+      indegree.put(nid, degree);
+
+      if (degree == 0L) {
+        // start from the nodes that has no intermediate input streams, so it only consumes from input streams
+        q.add(node);
+        visited.add(node);
+      }
+    });
 
     List<JobNode> sortedNodes = new ArrayList<>();
     Set<JobNode> reachable = new HashSet<>();
@@ -413,15 +413,15 @@ import org.slf4j.LoggerFactory;
         JobNode node = q.poll();
         sortedNodes.add(node);
         node.getOutEdges().values().stream().flatMap(edge -> edge.getTargetNodes().stream()).forEach(n -> {
-            String nid = n.getJobNameAndId();
-            Long degree = indegree.get(nid) - 1;
-            indegree.put(nid, degree);
-            if (degree == 0L && !visited.contains(n)) {
-              q.add(n);
-              visited.add(n);
-            }
-            reachable.add(n);
-          });
+          String nid = n.getJobNameAndId();
+          Long degree = indegree.get(nid) - 1;
+          indegree.put(nid, degree);
+          if (degree == 0L && !visited.contains(n)) {
+            q.add(n);
+            visited.add(n);
+          }
+          reachable.add(n);
+        });
       }
 
       if (sortedNodes.size() < pnodes.size()) {
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java b/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java
index a6717fd..e42b530 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java
@@ -225,19 +225,19 @@ import org.codehaus.jackson.map.ObjectMapper;
     OperatorGraphJson opGraph = new OperatorGraphJson();
     opGraph.inputStreams = new ArrayList<>();
     jobNode.getInEdges().values().forEach(inStream -> {
-        StreamJson inputJson = new StreamJson();
-        opGraph.inputStreams.add(inputJson);
-        inputJson.streamId = inStream.getStreamSpec().getId();
-        inputJson.nextOperatorIds = jobNode.getNextOperatorIds(inputJson.streamId);
-        updateOperatorGraphJson(jobNode.getInputOperator(inputJson.streamId), opGraph);
-      });
+      StreamJson inputJson = new StreamJson();
+      opGraph.inputStreams.add(inputJson);
+      inputJson.streamId = inStream.getStreamSpec().getId();
+      inputJson.nextOperatorIds = jobNode.getNextOperatorIds(inputJson.streamId);
+      updateOperatorGraphJson(jobNode.getInputOperator(inputJson.streamId), opGraph);
+    });
 
     opGraph.outputStreams = new ArrayList<>();
     jobNode.getOutEdges().values().forEach(outStream -> {
-        StreamJson outputJson = new StreamJson();
-        outputJson.streamId = outStream.getStreamSpec().getId();
-        opGraph.outputStreams.add(outputJson);
-      });
+      StreamJson outputJson = new StreamJson();
+      outputJson.streamId = outStream.getStreamSpec().getId();
+      opGraph.outputStreams.add(outputJson);
+    });
     return opGraph;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobNode.java b/samza-core/src/main/java/org/apache/samza/execution/JobNode.java
index e4fbdba..28bdff1 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobNode.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobNode.java
@@ -172,11 +172,11 @@ public class JobNode {
   private void findReachableOperators(Collection<OperatorSpec> inputOperatorsInJobNode,
       Set<OperatorSpec> reachableOperators) {
     inputOperatorsInJobNode.forEach(op -> {
-        if (reachableOperators.contains(op)) {
-          return;
-        }
-        reachableOperators.add(op);
-        findReachableOperators(op.getRegisteredOperatorSpecs(), reachableOperators);
-      });
+      if (reachableOperators.contains(op)) {
+        return;
+      }
+      reachableOperators.add(op);
+      findReachableOperators(op.getRegisteredOperatorSpecs(), reachableOperators);
+    });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java b/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java
index 4ae4886..d87530d 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java
@@ -74,11 +74,11 @@ import org.slf4j.LoggerFactory;
     Map<String, String> mergedConfig = new HashMap<>(generatedConfig);
 
     originalConfig.forEach((k, v) -> {
-        if (generatedConfig.containsKey(k) && !Objects.equals(generatedConfig.get(k), v)) {
-          LOG.info("Replacing generated config for key: {} value: {} with original config value: {}", k, generatedConfig.get(k), v);
-        }
-        mergedConfig.put(k, v);
-      });
+      if (generatedConfig.containsKey(k) && !Objects.equals(generatedConfig.get(k), v)) {
+        LOG.info("Replacing generated config for key: {} value: {} with original config value: {}", k, generatedConfig.get(k), v);
+      }
+      mergedConfig.put(k, v);
+    });
 
     return ConfigUtil.rewriteConfig(new MapConfig(mergedConfig));
   }
@@ -243,20 +243,20 @@ import org.slf4j.LoggerFactory;
 
     // Add side inputs to the inputs and mark the stream as bootstrap
     tables.values().forEach(tableDescriptor -> {
-        if (tableDescriptor instanceof LocalTableDescriptor) {
-          LocalTableDescriptor localTableDescriptor = (LocalTableDescriptor) tableDescriptor;
-          List<String> sideInputs = localTableDescriptor.getSideInputs();
-          if (sideInputs != null && !sideInputs.isEmpty()) {
-            sideInputs.stream()
-                .map(sideInput -> StreamUtil.getSystemStreamFromNameOrId(originalConfig, sideInput))
-                .forEach(systemStream -> {
-                    inputs.add(StreamUtil.getNameFromSystemStream(systemStream));
-                    generatedConfig.put(String.format(StreamConfig.STREAM_PREFIX + StreamConfig.BOOTSTRAP,
-                        systemStream.getSystem(), systemStream.getStream()), "true");
-                  });
-          }
+      if (tableDescriptor instanceof LocalTableDescriptor) {
+        LocalTableDescriptor localTableDescriptor = (LocalTableDescriptor) tableDescriptor;
+        List<String> sideInputs = localTableDescriptor.getSideInputs();
+        if (sideInputs != null && !sideInputs.isEmpty()) {
+          sideInputs.stream()
+              .map(sideInput -> StreamUtil.getSystemStreamFromNameOrId(originalConfig, sideInput))
+              .forEach(systemStream -> {
+                inputs.add(StreamUtil.getNameFromSystemStream(systemStream));
+                generatedConfig.put(String.format(StreamConfig.STREAM_PREFIX + StreamConfig.BOOTSTRAP,
+                    systemStream.getSystem(), systemStream.getStream()), "true");
+              });
         }
-      });
+      }
+    });
   }
 
   /**
@@ -285,15 +285,15 @@ import org.slf4j.LoggerFactory;
     Map<String, Serde> storeKeySerdes = new HashMap<>();
     Map<String, Serde> storeMsgSerdes = new HashMap<>();
     stores.forEach(storeDescriptor -> {
-        storeKeySerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getKeySerde());
-        storeMsgSerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getMsgSerde());
-      });
+      storeKeySerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getKeySerde());
+      storeMsgSerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getMsgSerde());
+    });
 
     Map<String, Serde> tableKeySerdes = new HashMap<>();
     Map<String, Serde> tableMsgSerdes = new HashMap<>();
     tables.forEach(tableId -> {
-        addSerdes(jobNode.getTableSerdes(tableId), tableId, tableKeySerdes, tableMsgSerdes);
-      });
+      addSerdes(jobNode.getTableSerdes(tableId), tableId, tableKeySerdes, tableMsgSerdes);
+    });
 
     // for each unique stream or store serde instance, generate a unique name and serialize to config
     HashSet<Serde> serdes = new HashSet<>(streamKeySerdes.values());
@@ -306,46 +306,46 @@ import org.slf4j.LoggerFactory;
     Base64.Encoder base64Encoder = Base64.getEncoder();
     Map<Serde, String> serdeUUIDs = new HashMap<>();
     serdes.forEach(serde -> {
-        String serdeName = serdeUUIDs.computeIfAbsent(serde,
-            s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
-        configs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
-            base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
-      });
+      String serdeName = serdeUUIDs.computeIfAbsent(serde,
+        s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
+      configs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
+          base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
+    });
 
     // set key and msg serdes for streams to the serde names generated above
     streamKeySerdes.forEach((streamId, serde) -> {
-        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
-        String keySerdeConfigKey = streamIdPrefix + StreamConfig.KEY_SERDE;
-        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
+      String keySerdeConfigKey = streamIdPrefix + StreamConfig.KEY_SERDE;
+      configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     streamMsgSerdes.forEach((streamId, serde) -> {
-        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
-        String valueSerdeConfigKey = streamIdPrefix + StreamConfig.MSG_SERDE;
-        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
+      String valueSerdeConfigKey = streamIdPrefix + StreamConfig.MSG_SERDE;
+      configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     // set key and msg serdes for stores to the serde names generated above
     storeKeySerdes.forEach((storeName, serde) -> {
-        String keySerdeConfigKey = String.format(StorageConfig.KEY_SERDE, storeName);
-        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String keySerdeConfigKey = String.format(StorageConfig.KEY_SERDE, storeName);
+      configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     storeMsgSerdes.forEach((storeName, serde) -> {
-        String msgSerdeConfigKey = String.format(StorageConfig.MSG_SERDE, storeName);
-        configs.put(msgSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String msgSerdeConfigKey = String.format(StorageConfig.MSG_SERDE, storeName);
+      configs.put(msgSerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     // set key and msg serdes for stores to the serde names generated above
     tableKeySerdes.forEach((tableId, serde) -> {
-        String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
-        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
+      configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     tableMsgSerdes.forEach((tableId, serde) -> {
-        String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
-        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
+      configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
+    });
   }
 
   private void addSerdes(KV<Serde, Serde> serdes, String streamId, Map<String, Serde> keySerdeMap,
diff --git a/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java b/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java
index 4999d06..63690b7 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java
@@ -136,8 +136,8 @@ public class StreamEdge {
       streamConfig.put(String.format(StreamConfig.PRIORITY_FOR_STREAM_ID, streamId), String.valueOf(Integer.MAX_VALUE));
     }
     spec.getConfig().forEach((property, value) -> {
-        streamConfig.put(String.format(StreamConfig.STREAM_ID_PREFIX, streamId) + property, value);
-      });
+      streamConfig.put(String.format(StreamConfig.STREAM_ID_PREFIX, streamId) + property, value);
+    });
 
     return new MapConfig(streamConfig);
   }
diff --git a/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java b/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java
index 26bc348..f75a8dc 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java
@@ -117,9 +117,9 @@ public class StreamManager {
           .map(id -> new StreamSpec(id, streamConfig.getPhysicalName(id), streamConfig.getSystem(id)))
           .collect(Collectors.toSet());
       intStreams.forEach(stream -> {
-          LOGGER.info("Clear intermediate stream {} in system {}", stream.getPhysicalName(), stream.getSystemName());
-          systemAdmins.getSystemAdmin(stream.getSystemName()).clearStream(stream);
-        });
+        LOGGER.info("Clear intermediate stream {} in system {}", stream.getPhysicalName(), stream.getSystemName());
+        systemAdmins.getSystemAdmin(stream.getSystemName()).clearStream(stream);
+      });
 
       //Find checkpoint stream and clean up
       TaskConfig taskConfig = new TaskConfig(prevConfig);
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java b/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java
index d4782b0..779644d 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java
@@ -75,11 +75,11 @@ class ControlMessageSender {
 
   private int getPartitionCount(SystemStream systemStream) {
     return PARTITION_COUNT_CACHE.computeIfAbsent(systemStream, ss -> {
-        SystemStreamMetadata metadata = metadataCache.getSystemStreamMetadata(ss, true);
-        if (metadata == null) {
-          throw new SamzaException("Unable to find metadata for stream " + systemStream);
-        }
-        return metadata.getSystemStreamPartitionMetadata().size();
-      });
+      SystemStreamMetadata metadata = metadataCache.getSystemStreamMetadata(ss, true);
+      if (metadata == null) {
+        throw new SamzaException("Unable to find metadata for stream " + systemStream);
+      }
+      return metadata.getSystemStreamPartitionMetadata().size();
+    });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java b/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java
index 8c9db61..7d9c597 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java
@@ -75,8 +75,8 @@ class EndOfStreamStates {
   EndOfStreamStates(Set<SystemStreamPartition> ssps, Map<SystemStream, Integer> producerTaskCounts) {
     Map<SystemStreamPartition, EndOfStreamState> states = new HashMap<>();
     ssps.forEach(ssp -> {
-        states.put(ssp, new EndOfStreamState(producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0)));
-      });
+      states.put(ssp, new EndOfStreamState(producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0)));
+    });
     this.eosStates = Collections.unmodifiableMap(states);
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java
index 528acc6..9fd35eb 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java
@@ -182,14 +182,14 @@ public abstract class OperatorImpl<M, RM> {
     }
 
     CompletionStage<Void> result = completableResultsFuture.thenCompose(results -> {
-        long endNs = this.highResClock.nanoTime();
-        this.handleMessageNs.update(endNs - startNs);
+      long endNs = this.highResClock.nanoTime();
+      this.handleMessageNs.update(endNs - startNs);
 
-        return CompletableFuture.allOf(results.stream()
-            .flatMap(r -> this.registeredOperators.stream()
-              .map(op -> op.onMessageAsync(r, collector, coordinator)))
-            .toArray(CompletableFuture[]::new));
-      });
+      return CompletableFuture.allOf(results.stream()
+          .flatMap(r -> this.registeredOperators.stream()
+            .map(op -> op.onMessageAsync(r, collector, coordinator)))
+          .toArray(CompletableFuture[]::new));
+    });
 
     WatermarkFunction watermarkFn = getOperatorSpec().getWatermarkFn();
     if (watermarkFn != null) {
@@ -281,13 +281,13 @@ public abstract class OperatorImpl<M, RM> {
       // populate the end-of-stream through the dag
       endOfStreamFuture = onEndOfStream(collector, coordinator)
           .thenAccept(result -> {
-              if (eosStates.allEndOfStream()) {
-                // all inputs have been end-of-stream, shut down the task
-                LOG.info("All input streams have reached the end for task {}", taskName.getTaskName());
-                coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
-                coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
-              }
-            });
+            if (eosStates.allEndOfStream()) {
+              // all inputs have been end-of-stream, shut down the task
+              LOG.info("All input streams have reached the end for task {}", taskName.getTaskName());
+              coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+              coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+            }
+          });
     }
 
     return endOfStreamFuture;
@@ -485,24 +485,24 @@ public abstract class OperatorImpl<M, RM> {
       @Override
       public void schedule(K key, long time) {
         callbackScheduler.scheduleCallback(key, time, (k, collector, coordinator) -> {
-            final ScheduledFunction<K, RM> scheduledFn = getOperatorSpec().getScheduledFn();
-            if (scheduledFn != null) {
-              final Collection<RM> output = scheduledFn.onCallback(key, time);
-
-              if (!output.isEmpty()) {
-                CompletableFuture<Void> timerFuture = CompletableFuture.allOf(output.stream()
-                    .flatMap(r -> registeredOperators.stream()
-                        .map(op -> op.onMessageAsync(r, collector, coordinator)))
-                    .toArray(CompletableFuture[]::new));
-
-                timerFuture.join();
-              }
-            } else {
-              throw new SamzaException(
-                  String.format("Operator %s id %s (created at %s) must implement ScheduledFunction to use system timer.",
-                      getOperatorSpec().getOpCode().name(), getOpImplId(), getOperatorSpec().getSourceLocation()));
+          final ScheduledFunction<K, RM> scheduledFn = getOperatorSpec().getScheduledFn();
+          if (scheduledFn != null) {
+            final Collection<RM> output = scheduledFn.onCallback(key, time);
+
+            if (!output.isEmpty()) {
+              CompletableFuture<Void> timerFuture = CompletableFuture.allOf(output.stream()
+                  .flatMap(r -> registeredOperators.stream()
+                      .map(op -> op.onMessageAsync(r, collector, coordinator)))
+                  .toArray(CompletableFuture[]::new));
+
+              timerFuture.join();
             }
-          });
+          } else {
+            throw new SamzaException(
+                String.format("Operator %s id %s (created at %s) must implement ScheduledFunction to use system timer.",
+                    getOperatorSpec().getOpCode().name(), getOpImplId(), getOperatorSpec().getSourceLocation()));
+          }
+        });
       }
 
       @Override
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java
index 4cf7201..705f0cb 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java
@@ -108,8 +108,8 @@ public class OperatorImplGraph {
                 getIntermediateToInputStreamsMap(specGraph, streamConfig))
             : Collections.EMPTY_MAP;
     producerTaskCounts.forEach((stream, count) -> {
-        LOG.info("{} has {} producer tasks.", stream, count);
-      });
+      LOG.info("{} has {} producer tasks.", stream, count);
+    });
 
     // set states for end-of-stream
     internalTaskContext.registerObject(EndOfStreamStates.class.getName(),
@@ -124,11 +124,11 @@ public class OperatorImplGraph {
                 context.getContainerContext().getContainerMetricsRegistry()));
 
     specGraph.getInputOperators().forEach((streamId, inputOpSpec) -> {
-        SystemStream systemStream = streamConfig.streamIdToSystemStream(streamId);
-        InputOperatorImpl inputOperatorImpl =
-            (InputOperatorImpl) createAndRegisterOperatorImpl(null, inputOpSpec, systemStream, context);
-        this.inputOperators.put(systemStream, inputOperatorImpl);
-      });
+      SystemStream systemStream = streamConfig.streamIdToSystemStream(streamId);
+      InputOperatorImpl inputOperatorImpl =
+          (InputOperatorImpl) createAndRegisterOperatorImpl(null, inputOpSpec, systemStream, context);
+      this.inputOperators.put(systemStream, inputOperatorImpl);
+    });
   }
 
   /**
@@ -187,10 +187,10 @@ public class OperatorImplGraph {
 
       Collection<OperatorSpec> registeredSpecs = operatorSpec.getRegisteredOperatorSpecs();
       registeredSpecs.forEach(registeredSpec -> {
-          LOG.debug("Creating operator {} with opCode: {}", registeredSpec.getOpId(), registeredSpec.getOpCode());
-          OperatorImpl nextImpl = createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context);
-          operatorImpl.registerNextOperator(nextImpl);
-        });
+        LOG.debug("Creating operator {} with opCode: {}", registeredSpec.getOpId(), registeredSpec.getOpCode());
+        OperatorImpl nextImpl = createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context);
+        operatorImpl.registerNextOperator(nextImpl);
+      });
       return operatorImpl;
     } else {
       // the implementation corresponding to operatorSpec has already been instantiated and registered.
@@ -200,7 +200,7 @@ public class OperatorImplGraph {
       // We still need to traverse the DAG further to register the input streams.
       Collection<OperatorSpec> registeredSpecs = operatorSpec.getRegisteredOperatorSpecs();
       registeredSpecs.forEach(
-          registeredSpec -> createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context));
+        registeredSpec -> createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context));
       return operatorImpl;
     }
   }
@@ -255,7 +255,7 @@ public class OperatorImplGraph {
       Clock clock) {
     // get the per task pair of PartialJoinOperatorImpl for the corresponding {@code joinOpSpec}
     KV<PartialJoinOperatorImpl, PartialJoinOperatorImpl> partialJoinOpImpls = joinOpImpls.computeIfAbsent(joinOpSpec.getOpId(),
-        joinOpId -> {
+      joinOpId -> {
         PartialJoinFunction leftJoinFn = createLeftJoinFn(joinOpSpec);
         PartialJoinFunction rightJoinFn = createRightJoinFn(joinOpSpec);
         return new KV(new PartialJoinOperatorImpl(joinOpSpec, true, leftJoinFn, rightJoinFn, clock),
@@ -365,12 +365,12 @@ public class OperatorImplGraph {
   static Multimap<SystemStream, String> getStreamToConsumerTasks(JobModel jobModel) {
     Multimap<SystemStream, String> streamToConsumerTasks = HashMultimap.create();
     jobModel.getContainers().values().forEach(containerModel -> {
-        containerModel.getTasks().values().forEach(taskModel -> {
-            taskModel.getSystemStreamPartitions().forEach(ssp -> {
-                streamToConsumerTasks.put(ssp.getSystemStream(), taskModel.getTaskName().getTaskName());
-              });
-          });
+      containerModel.getTasks().values().forEach(taskModel -> {
+        taskModel.getSystemStreamPartitions().forEach(ssp -> {
+          streamToConsumerTasks.put(ssp.getSystemStream(), taskModel.getTaskName().getTaskName());
+        });
       });
+    });
     return streamToConsumerTasks;
   }
 
@@ -384,9 +384,9 @@ public class OperatorImplGraph {
     Multimap<SystemStream, SystemStream> outputToInputStreams = HashMultimap.create();
     specGraph.getInputOperators().entrySet().stream()
         .forEach(entry -> {
-            SystemStream systemStream = streamConfig.streamIdToSystemStream(entry.getKey());
-            computeOutputToInput(systemStream, entry.getValue(), outputToInputStreams, streamConfig);
-          });
+          SystemStream systemStream = streamConfig.streamIdToSystemStream(entry.getKey());
+          computeOutputToInput(systemStream, entry.getValue(), outputToInputStreams, streamConfig);
+        });
     return outputToInputStreams;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java
index 657ba2a..2104c4e 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java
@@ -36,8 +36,8 @@ class WatermarkMetrics extends MetricsBase {
 
   void setAggregateTime(SystemStreamPartition systemStreamPartition, long time) {
     final Gauge<Long> aggregate = aggregates.computeIfAbsent(systemStreamPartition,
-        ssp -> newGauge(String.format("%s-%s-aggr-watermark",
-        ssp.getStream(), ssp.getPartition().getPartitionId()), 0L));
+      ssp -> newGauge(String.format("%s-%s-aggr-watermark",
+          ssp.getStream(), ssp.getPartition().getPartitionId()), 0L));
     aggregate.set(time);
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java
index b363b2c..84e0687 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java
@@ -94,12 +94,12 @@ class WatermarkStates {
     final List<SystemStreamPartition> intSsps = new ArrayList<>();
 
     ssps.forEach(ssp -> {
-        final int producerCount = producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0);
-        states.put(ssp, new WatermarkState(producerCount));
-        if (producerCount != 0) {
-          intSsps.add(ssp);
-        }
-      });
+      final int producerCount = producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0);
+      states.put(ssp, new WatermarkState(producerCount));
+      if (producerCount != 0) {
+        intSsps.add(ssp);
+      }
+    });
     this.watermarkStates = Collections.unmodifiableMap(states);
     this.watermarkMetrics = new WatermarkMetrics(metricsRegistry);
     this.intermediateSsps = Collections.unmodifiableList(intSsps);
diff --git a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java
index 32bf988..b3df99d 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java
@@ -51,9 +51,9 @@ public class TimeSinceFirstMessageTriggerImpl<M, WK> implements TriggerImpl<M, W
       long triggerDurationMs = trigger.getDuration().toMillis();
       Long callbackTime = now + triggerDurationMs;
       cancellable =  context.scheduleCallback(() -> {
-          LOG.trace("Time since first message trigger fired");
-          shouldFire = true;
-        }, callbackTime, triggerKey);
+        LOG.trace("Time since first message trigger fired");
+        shouldFire = true;
+      }, callbackTime, triggerKey);
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java
index 2454ce9..e8a4fe2 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java
@@ -51,9 +51,9 @@ public class TimeTriggerImpl<M, WK> implements TriggerImpl<M, WK> {
 
     if (cancellable == null) {
       cancellable = context.scheduleCallback(() -> {
-          LOG.trace("Time trigger fired");
-          shouldFire = true;
-        }, callbackTime, triggerKey);
+        LOG.trace("Time trigger fired");
+        shouldFire = true;
+      }, callbackTime, triggerKey);
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java b/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java
index 1a613f7..4470ae7 100644
--- a/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java
+++ b/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java
@@ -197,14 +197,14 @@ public class ContainerLaunchUtil {
     if (executionEnvContainerId != null) {
       log.info("Got execution environment container id: {}", executionEnvContainerId);
       return new ContainerHeartbeatMonitor(() -> {
-          try {
-            container.shutdown();
-            containerRunnerException = new SamzaException("Container shutdown due to expired heartbeat");
-          } catch (Exception e) {
-            log.error("Heartbeat monitor failed to shutdown the container gracefully. Exiting process.", e);
-            System.exit(1);
-          }
-        }, new ContainerHeartbeatClient(coordinatorUrl, executionEnvContainerId));
+        try {
+          container.shutdown();
+          containerRunnerException = new SamzaException("Container shutdown due to expired heartbeat");
+        } catch (Exception e) {
+          log.error("Heartbeat monitor failed to shutdown the container gracefully. Exiting process.", e);
+          System.exit(1);
+        }
+      }, new ContainerHeartbeatClient(coordinatorUrl, executionEnvContainerId));
     } else {
       log.warn("Execution environment container id not set. Container heartbeat monitor will not be created");
       return null;
diff --git a/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java b/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java
index 8c3c029..37b2c0b 100644
--- a/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java
+++ b/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java
@@ -226,15 +226,15 @@ public class LocalApplicationRunner implements ApplicationRunner {
         throw new SamzaException("No jobs to run.");
       }
       jobConfigs.forEach(jobConfig -> {
-          LOG.debug("Starting job {} StreamProcessor with config {}", jobConfig.getName(), jobConfig);
-          MetadataStore coordinatorStreamStore = createCoordinatorStreamStore(jobConfig);
-          if (coordinatorStreamStore != null) {
-            coordinatorStreamStore.init();
-          }
-          StreamProcessor processor = createStreamProcessor(jobConfig, appDesc,
-              sp -> new LocalStreamProcessorLifecycleListener(sp, jobConfig), Optional.ofNullable(externalContext), coordinatorStreamStore);
-          processors.add(Pair.of(processor, coordinatorStreamStore));
-        });
+        LOG.debug("Starting job {} StreamProcessor with config {}", jobConfig.getName(), jobConfig);
+        MetadataStore coordinatorStreamStore = createCoordinatorStreamStore(jobConfig);
+        if (coordinatorStreamStore != null) {
+          coordinatorStreamStore.init();
+        }
+        StreamProcessor processor = createStreamProcessor(jobConfig, appDesc,
+          sp -> new LocalStreamProcessorLifecycleListener(sp, jobConfig), Optional.ofNullable(externalContext), coordinatorStreamStore);
+        processors.add(Pair.of(processor, coordinatorStreamStore));
+      });
       numProcessorsToStart.set(processors.size());
 
       // start the StreamProcessors
@@ -251,13 +251,13 @@ public class LocalApplicationRunner implements ApplicationRunner {
   @Override
   public void kill() {
     processors.forEach(sp -> {
-        sp.getLeft().stop();    // Stop StreamProcessor
+      sp.getLeft().stop();    // Stop StreamProcessor
 
-        // Coordinator stream isn't required so a null check is necessary
-        if (sp.getRight() != null) {
-          sp.getRight().close();  // Close associated coordinator metadata store
-        }
-      });
+      // Coordinator stream isn't required so a null check is necessary
+      if (sp.getRight() != null) {
+        sp.getRight().close();  // Close associated coordinator metadata store
+      }
+    });
     cleanup();
   }
 
@@ -448,9 +448,9 @@ public class LocalApplicationRunner implements ApplicationRunner {
       if (failure.compareAndSet(null, t)) {
         // shutdown the other processors
         processors.forEach(sp -> {
-            sp.getLeft().stop();    // Stop StreamProcessor
-            sp.getRight().close();  // Close associated coordinator metadata store
-          });
+          sp.getLeft().stop();    // Stop StreamProcessor
+          sp.getRight().close();  // Close associated coordinator metadata store
+        });
       }
 
       // handle the current processor's shutdown failure.
diff --git a/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java b/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java
index 3af5db1..bb4ea18 100644
--- a/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java
+++ b/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java
@@ -77,10 +77,10 @@ public class RemoteApplicationRunner implements ApplicationRunner {
 
       // 3. submit jobs for remote execution
       jobConfigs.forEach(jobConfig -> {
-          LOG.info("Starting job {} with config {}", jobConfig.getName(), jobConfig);
-          JobRunner runner = new JobRunner(jobConfig);
-          runner.run(true);
-        });
+        LOG.info("Starting job {} with config {}", jobConfig.getName(), jobConfig);
+        JobRunner runner = new JobRunner(jobConfig);
+        runner.run(true);
+      });
     } catch (Throwable t) {
       throw new SamzaException("Failed to run application", t);
     }
diff --git a/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java b/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java
index 4b1e281..4e4bdf1 100644
--- a/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java
+++ b/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java
@@ -88,13 +88,13 @@ public class EpochTimeScheduler {
 
     final long delay = timestamp - System.currentTimeMillis();
     final ScheduledFuture<?> scheduledFuture = executor.schedule(() -> {
-        scheduledFutures.remove(key);
-        readyTimers.put(TimerKey.of(key, timestamp), callback);
+      scheduledFutures.remove(key);
+      readyTimers.put(TimerKey.of(key, timestamp), callback);
 
-        if (timerListener != null) {
-          timerListener.onTimer();
-        }
-      }, delay > 0 ? delay : 0, TimeUnit.MILLISECONDS);
+      if (timerListener != null) {
+        timerListener.onTimer();
+      }
+    }, delay > 0 ? delay : 0, TimeUnit.MILLISECONDS);
     scheduledFutures.put(key, scheduledFuture);
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java b/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java
index 4eed058..5eda128 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java
@@ -74,12 +74,12 @@ public class ChangelogStreamManager {
     LOG.debug("Reading changelog partition information");
     final Map<TaskName, Integer> changelogMapping = new HashMap<>();
     metadataStore.all().forEach((taskName, partitionIdAsBytes) -> {
-        String partitionId = valueSerde.fromBytes(partitionIdAsBytes);
-        LOG.debug("TaskName: {} is mapped to {}", taskName, partitionId);
-        if (StringUtils.isNotBlank(partitionId)) {
-          changelogMapping.put(new TaskName(taskName), Integer.valueOf(partitionId));
-        }
-      });
+      String partitionId = valueSerde.fromBytes(partitionIdAsBytes);
+      LOG.debug("TaskName: {} is mapped to {}", taskName, partitionId);
+      if (StringUtils.isNotBlank(partitionId)) {
+        changelogMapping.put(new TaskName(taskName), Integer.valueOf(partitionId));
+      }
+    });
     return changelogMapping;
   }
 
@@ -129,47 +129,47 @@ public class ChangelogStreamManager {
     StorageConfig storageConfig = new StorageConfig(config);
     ImmutableMap.Builder<String, SystemStream> storeNameSystemStreamMapBuilder = new ImmutableMap.Builder<>();
     storageConfig.getStoreNames().forEach(storeName -> {
-        Optional<String> changelogStream = storageConfig.getChangelogStream(storeName);
-        if (changelogStream.isPresent() && StringUtils.isNotBlank(changelogStream.get())) {
-          storeNameSystemStreamMapBuilder.put(storeName, StreamUtil.getSystemStreamFromNames(changelogStream.get()));
-        }
-      });
+      Optional<String> changelogStream = storageConfig.getChangelogStream(storeName);
+      if (changelogStream.isPresent() && StringUtils.isNotBlank(changelogStream.get())) {
+        storeNameSystemStreamMapBuilder.put(storeName, StreamUtil.getSystemStreamFromNames(changelogStream.get()));
+      }
+    });
     Map<String, SystemStream> storeNameSystemStreamMapping = storeNameSystemStreamMapBuilder.build();
 
     // Get SystemAdmin for changelog store's system and attempt to create the stream
     SystemConfig systemConfig = new SystemConfig(config);
     storeNameSystemStreamMapping.forEach((storeName, systemStream) -> {
-        // Load system admin for this system.
-        SystemAdmin systemAdmin = systemConfig.getSystemAdmin(systemStream.getSystem());
-
-        if (systemAdmin == null) {
-          throw new SamzaException(String.format(
-              "Error creating changelog. Changelog on store %s uses system %s, which is missing from the configuration.",
-              storeName, systemStream.getSystem()));
-        }
-
-        StreamSpec changelogSpec =
-            StreamSpec.createChangeLogStreamSpec(systemStream.getStream(), systemStream.getSystem(),
-                maxChangeLogStreamPartitions);
-
-        systemAdmin.start();
-
-        if (systemAdmin.createStream(changelogSpec)) {
-          LOG.info(String.format("created changelog stream %s.", systemStream.getStream()));
-        } else {
-          LOG.info(String.format("changelog stream %s already exists.", systemStream.getStream()));
-        }
-        systemAdmin.validateStream(changelogSpec);
-
-        if (storageConfig.getAccessLogEnabled(storeName)) {
-          String accesslogStream = storageConfig.getAccessLogStream(systemStream.getStream());
-          StreamSpec accesslogSpec =
-              new StreamSpec(accesslogStream, accesslogStream, systemStream.getSystem(), maxChangeLogStreamPartitions);
-          systemAdmin.createStream(accesslogSpec);
-          systemAdmin.validateStream(accesslogSpec);
-        }
-
-        systemAdmin.stop();
-      });
+      // Load system admin for this system.
+      SystemAdmin systemAdmin = systemConfig.getSystemAdmin(systemStream.getSystem());
+
+      if (systemAdmin == null) {
+        throw new SamzaException(String.format(
+            "Error creating changelog. Changelog on store %s uses system %s, which is missing from the configuration.",
+            storeName, systemStream.getSystem()));
+      }
+
+      StreamSpec changelogSpec =
+          StreamSpec.createChangeLogStreamSpec(systemStream.getStream(), systemStream.getSystem(),
+              maxChangeLogStreamPartitions);
+
+      systemAdmin.start();
+
+      if (systemAdmin.createStream(changelogSpec)) {
+        LOG.info(String.format("created changelog stream %s.", systemStream.getStream()));
+      } else {
+        LOG.info(String.format("changelog stream %s already exists.", systemStream.getStream()));
+      }
+      systemAdmin.validateStream(changelogSpec);
+
+      if (storageConfig.getAccessLogEnabled(storeName)) {
+        String accesslogStream = storageConfig.getAccessLogStream(systemStream.getStream());
+        StreamSpec accesslogSpec =
+            new StreamSpec(accesslogStream, accesslogStream, systemStream.getSystem(), maxChangeLogStreamPartitions);
+        systemAdmin.createStream(accesslogSpec);
+        systemAdmin.validateStream(accesslogSpec);
+      }
+
+      systemAdmin.stop();
+    });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java b/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java
index d2f0097..44dd59a 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java
@@ -127,37 +127,37 @@ class NonTransactionalStateTaskRestoreManager implements TaskRestoreManager {
 
     FileUtil fileUtil = new FileUtil();
     taskStores.forEach((storeName, storageEngine) -> {
-        if (!storageEngine.getStoreProperties().isLoggedStore()) {
-          File nonLoggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
-          LOG.info("Got non logged storage partition directory as " + nonLoggedStorePartitionDir.toPath().toString());
-
-          if (nonLoggedStorePartitionDir.exists()) {
-            LOG.info("Deleting non logged storage partition directory " + nonLoggedStorePartitionDir.toPath().toString());
-            fileUtil.rm(nonLoggedStorePartitionDir);
-          }
+      if (!storageEngine.getStoreProperties().isLoggedStore()) {
+        File nonLoggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        LOG.info("Got non logged storage partition directory as " + nonLoggedStorePartitionDir.toPath().toString());
+
+        if (nonLoggedStorePartitionDir.exists()) {
+          LOG.info("Deleting non logged storage partition directory " + nonLoggedStorePartitionDir.toPath().toString());
+          fileUtil.rm(nonLoggedStorePartitionDir);
+        }
+      } else {
+        File loggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        LOG.info("Got logged storage partition directory as " + loggedStorePartitionDir.toPath().toString());
+
+        // Delete the logged store if it is not valid.
+        if (!isLoggedStoreValid(storeName, loggedStorePartitionDir) || storageConfig.getCleanLoggedStoreDirsOnStart(storeName)) {
+          LOG.info("Deleting logged storage partition directory " + loggedStorePartitionDir.toPath().toString());
+          fileUtil.rm(loggedStorePartitionDir);
         } else {
-          File loggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
-          LOG.info("Got logged storage partition directory as " + loggedStorePartitionDir.toPath().toString());
-
-          // Delete the logged store if it is not valid.
-          if (!isLoggedStoreValid(storeName, loggedStorePartitionDir) || storageConfig.getCleanLoggedStoreDirsOnStart(storeName)) {
-            LOG.info("Deleting logged storage partition directory " + loggedStorePartitionDir.toPath().toString());
-            fileUtil.rm(loggedStorePartitionDir);
-          } else {
-
-            SystemStreamPartition changelogSSP = new SystemStreamPartition(changelogSystemStreams.get(storeName), taskModel.getChangelogPartition());
-            Map<SystemStreamPartition, String> offset =
-                storageManagerUtil.readOffsetFile(loggedStorePartitionDir, Collections.singleton(changelogSSP), false);
-            LOG.info("Read offset {} for the store {} from logged storage partition directory {}", offset, storeName, loggedStorePartitionDir);
-
-            if (offset.containsKey(changelogSSP)) {
-              fileOffsets.put(changelogSSP, offset.get(changelogSSP));
-            }
+
+          SystemStreamPartition changelogSSP = new SystemStreamPartition(changelogSystemStreams.get(storeName), taskModel.getChangelogPartition());
+          Map<SystemStreamPartition, String> offset =
+              storageManagerUtil.readOffsetFile(loggedStorePartitionDir, Collections.singleton(changelogSSP), false);
+          LOG.info("Read offset {} for the store {} from logged storage partition directory {}", offset, storeName, loggedStorePartitionDir);
+
+          if (offset.containsKey(changelogSSP)) {
+            fileOffsets.put(changelogSSP, offset.get(changelogSSP));
           }
         }
-      });
+      }
+    });
   }
 
   /**
@@ -188,25 +188,25 @@ class NonTransactionalStateTaskRestoreManager implements TaskRestoreManager {
   private void setupBaseDirs() {
     LOG.debug("Setting up base directories for stores.");
     taskStores.forEach((storeName, storageEngine) -> {
-        if (storageEngine.getStoreProperties().isLoggedStore()) {
+      if (storageEngine.getStoreProperties().isLoggedStore()) {
 
-          File loggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        File loggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
 
-          LOG.info("Using logged storage partition directory: " + loggedStorePartitionDir.toPath().toString()
-              + " for store: " + storeName);
+        LOG.info("Using logged storage partition directory: " + loggedStorePartitionDir.toPath().toString()
+            + " for store: " + storeName);
 
-          if (!loggedStorePartitionDir.exists()) {
-            loggedStorePartitionDir.mkdirs();
-          }
-        } else {
-          File nonLoggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
-          LOG.info("Using non logged storage partition directory: " + nonLoggedStorePartitionDir.toPath().toString()
-              + " for store: " + storeName);
-          nonLoggedStorePartitionDir.mkdirs();
+        if (!loggedStorePartitionDir.exists()) {
+          loggedStorePartitionDir.mkdirs();
         }
-      });
+      } else {
+        File nonLoggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        LOG.info("Using non logged storage partition directory: " + nonLoggedStorePartitionDir.toPath().toString()
+            + " for store: " + storeName);
+        nonLoggedStorePartitionDir.mkdirs();
+      }
+    });
   }
 
   /**
@@ -336,13 +336,13 @@ class NonTransactionalStateTaskRestoreManager implements TaskRestoreManager {
   public void stopPersistentStores() {
 
     Map<String, StorageEngine> persistentStores = this.taskStores.entrySet().stream().filter(e -> {
-        return e.getValue().getStoreProperties().isPersistedToDisk();
-      }).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+      return e.getValue().getStoreProperties().isPersistedToDisk();
+    }).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
 
     persistentStores.forEach((storeName, storageEngine) -> {
-        storageEngine.stop();
-        this.taskStores.remove(storeName);
-      });
+      storageEngine.stop();
+      this.taskStores.remove(storeName);
+    });
     LOG.info("Stopped persistent stores {}", persistentStores);
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java b/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java
index 5d34176..e292df9 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java
@@ -116,15 +116,15 @@ public class StorageRecovery {
 
     systemAdmins.start();
     this.containerStorageManagers.forEach((containerName, containerStorageManager) -> {
-        try {
-          containerStorageManager.start();
-        } catch (InterruptedException e) {
-          // we can ignore the exception since its only used in the context of a command line tool and bubbling the
-          // exception upstream isn't needed.
-          LOG.warn("Received an interrupt during store restoration for container {}."
-              + " Proceeding with the next container", containerName);
-        }
-      });
+      try {
+        containerStorageManager.start();
+      } catch (InterruptedException e) {
+        // we can ignore the exception since its only used in the context of a command line tool and bubbling the
+        // exception upstream isn't needed.
+        LOG.warn("Received an interrupt during store restoration for container {}."
+            + " Proceeding with the next container", containerName);
+      }
+    });
     this.containerStorageManagers.forEach((containerName, containerStorageManager) -> containerStorageManager.shutdown());
     systemAdmins.stop();
 
@@ -201,13 +201,13 @@ public class StorageRecovery {
     // Adding all serdes from factories
     serializerConfig.getSerdeNames()
         .forEach(serdeName -> {
-            String serdeClassName = serializerConfig.getSerdeFactoryClass(serdeName)
-              .orElseGet(() -> SerializerConfig.getPredefinedSerdeFactoryName(serdeName));
-            @SuppressWarnings("unchecked")
-            Serde<Object> serde =
-                ReflectionUtil.getObj(serdeClassName, SerdeFactory.class).getSerde(serdeName, serializerConfig);
-            serdeMap.put(serdeName, serde);
-          });
+          String serdeClassName = serializerConfig.getSerdeFactoryClass(serdeName)
+            .orElseGet(() -> SerializerConfig.getPredefinedSerdeFactoryName(serdeName));
+          @SuppressWarnings("unchecked")
+          Serde<Object> serde =
+              ReflectionUtil.getObj(serdeClassName, SerdeFactory.class).getSerde(serdeName, serializerConfig);
+          serdeMap.put(serdeName, serde);
+        });
 
     return serdeMap;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputHandler.java b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputHandler.java
index 56aeb85..7ab4036 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputHandler.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputHandler.java
@@ -79,14 +79,14 @@ public class TaskSideInputHandler {
 
     this.sspToStores = new HashMap<>();
     storeToSSPs.forEach((store, ssps) -> {
-        for (SystemStreamPartition ssp: ssps) {
-          this.sspToStores.computeIfAbsent(ssp, key -> new HashSet<>());
-          this.sspToStores.computeIfPresent(ssp, (key, value) -> {
-              value.add(store);
-              return value;
-            });
-        }
-      });
+      for (SystemStreamPartition ssp: ssps) {
+        this.sspToStores.computeIfAbsent(ssp, key -> new HashSet<>());
+        this.sspToStores.computeIfPresent(ssp, (key, value) -> {
+          value.add(store);
+          return value;
+        });
+      }
+    });
 
     this.taskSideInputStorageManager = new TaskSideInputStorageManager(taskName,
         taskMode,
@@ -207,13 +207,13 @@ public class TaskSideInputHandler {
     Map<SystemStreamPartition, String> startingOffsets = new HashMap<>();
 
     this.sspToStores.keySet().forEach(ssp -> {
-        String fileOffset = fileOffsets.get(ssp);
-        String oldestOffset = oldestOffsets.get(ssp);
+      String fileOffset = fileOffsets.get(ssp);
+      String oldestOffset = oldestOffsets.get(ssp);
 
-        startingOffsets.put(ssp,
-            this.storageManagerUtil.getStartingOffset(
-                ssp, this.systemAdmins.getSystemAdmin(ssp.getSystem()), fileOffset, oldestOffset));
-      });
+      startingOffsets.put(ssp,
+          this.storageManagerUtil.getStartingOffset(
+              ssp, this.systemAdmins.getSystemAdmin(ssp.getSystem()), fileOffset, oldestOffset));
+    });
 
     return startingOffsets;
   }
@@ -244,17 +244,17 @@ public class TaskSideInputHandler {
     // Step 3
     metadata.forEach((systemStream, systemStreamMetadata) -> {
 
-        // get the partition metadata for each system stream
-        Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata =
-            systemStreamMetadata.getSystemStreamPartitionMetadata();
+      // get the partition metadata for each system stream
+      Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata =
+          systemStreamMetadata.getSystemStreamPartitionMetadata();
 
-        // For SSPs belonging to the system stream, use the partition metadata to get the oldest offset
-        // if partitionMetadata was not obtained for any SSP, populate oldest-offset as null
-        // Because of https://bugs.openjdk.java.net/browse/JDK-8148463 using lambda will NPE when getOldestOffset() is null
-        for (SystemStreamPartition ssp : systemStreamToSsp.get(systemStream)) {
-          oldestOffsets.put(ssp, partitionMetadata.get(ssp.getPartition()).getOldestOffset());
-        }
-      });
+      // For SSPs belonging to the system stream, use the partition metadata to get the oldest offset
+      // if partitionMetadata was not obtained for any SSP, populate oldest-offset as null
+      // Because of https://bugs.openjdk.java.net/browse/JDK-8148463 using lambda will NPE when getOldestOffset() is null
+      for (SystemStreamPartition ssp : systemStreamToSsp.get(systemStream)) {
+        oldestOffsets.put(ssp, partitionMetadata.get(ssp.getPartition()).getOldestOffset());
+      }
+    });
 
     return oldestOffsets;
   }
@@ -264,10 +264,10 @@ public class TaskSideInputHandler {
    */
   private void validateProcessorConfiguration(Set<String> stores, Map<String, SideInputsProcessor> storeToProcessor) {
     stores.forEach(storeName -> {
-        if (!storeToProcessor.containsKey(storeName)) {
-          throw new SamzaException(
-              String.format("Side inputs processor missing for store: %s.", storeName));
-        }
-      });
+      if (!storeToProcessor.containsKey(storeName)) {
+        throw new SamzaException(
+            String.format("Side inputs processor missing for store: %s.", storeName));
+      }
+    });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java
index 0a4e763..c93e0b3 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java
@@ -125,18 +125,18 @@ public class TaskSideInputStorageManager {
     LOG.info("Initializing side input store directories.");
 
     stores.keySet().forEach(storeName -> {
-        File storeLocation = getStoreLocation(storeName);
-        String storePath = storeLocation.toPath().toString();
-        if (!isValidSideInputStore(storeName, storeLocation)) {
-          LOG.info("Cleaning up the store directory at {} for {}", storePath, storeName);
-          new FileUtil().rm(storeLocation);
-        }
-
-        if (isPersistedStore(storeName) && !storeLocation.exists()) {
-          LOG.info("Creating {} as the store directory for the side input store {}", storePath, storeName);
-          storeLocation.mkdirs();
-        }
-      });
+      File storeLocation = getStoreLocation(storeName);
+      String storePath = storeLocation.toPath().toString();
+      if (!isValidSideInputStore(storeName, storeLocation)) {
+        LOG.info("Cleaning up the store directory at {} for {}", storePath, storeName);
+        new FileUtil().rm(storeLocation);
+      }
+
+      if (isPersistedStore(storeName) && !storeLocation.exists()) {
+        LOG.info("Creating {} as the store directory for the side input store {}", storePath, storeName);
+        storeLocation.mkdirs();
+      }
+    });
   }
 
   /**
@@ -149,18 +149,18 @@ public class TaskSideInputStorageManager {
     storeToSSps.entrySet().stream()
         .filter(entry -> isPersistedStore(entry.getKey())) // filter out in-memory side input stores
         .forEach((entry) -> {
-            String storeName = entry.getKey();
-            Map<SystemStreamPartition, String> offsets = entry.getValue().stream()
-              .filter(lastProcessedOffsets::containsKey)
-              .collect(Collectors.toMap(Function.identity(), lastProcessedOffsets::get));
-
-            try {
-              File taskStoreDir = storageManagerUtil.getTaskStoreDir(storeBaseDir, storeName, taskName, taskMode);
-              storageManagerUtil.writeOffsetFile(taskStoreDir, offsets, true);
-            } catch (Exception e) {
-              throw new SamzaException("Failed to write offset file for side input store: " + storeName, e);
-            }
-          });
+          String storeName = entry.getKey();
+          Map<SystemStreamPartition, String> offsets = entry.getValue().stream()
+            .filter(lastProcessedOffsets::containsKey)
+            .collect(Collectors.toMap(Function.identity(), lastProcessedOffsets::get));
+
+          try {
+            File taskStoreDir = storageManagerUtil.getTaskStoreDir(storeBaseDir, storeName, taskName, taskMode);
+            storageManagerUtil.writeOffsetFile(taskStoreDir, offsets, true);
+          } catch (Exception e) {
+            throw new SamzaException("Failed to write offset file for side input store: " + storeName, e);
+          }
+        });
   }
 
   /**
@@ -173,20 +173,20 @@ public class TaskSideInputStorageManager {
     Map<SystemStreamPartition, String> fileOffsets = new HashMap<>();
 
     stores.keySet().forEach(storeName -> {
-        LOG.debug("Reading local offsets for store: {}", storeName);
+      LOG.debug("Reading local offsets for store: {}", storeName);
 
-        File storeLocation = getStoreLocation(storeName);
-        if (isValidSideInputStore(storeName, storeLocation)) {
-          try {
+      File storeLocation = getStoreLocation(storeName);
+      if (isValidSideInputStore(storeName, storeLocation)) {
+        try {
 
-            Map<SystemStreamPartition, String> offsets =
-                storageManagerUtil.readOffsetFile(storeLocation, storeToSSps.get(storeName), true);
-            fileOffsets.putAll(offsets);
-          } catch (Exception e) {
-            LOG.warn("Failed to load the offset file for side input store:" + storeName, e);
-          }
+          Map<SystemStreamPartition, String> offsets =
+              storageManagerUtil.readOffsetFile(storeLocation, storeToSSps.get(storeName), true);
+          fileOffsets.putAll(offsets);
+        } catch (Exception e) {
+          LOG.warn("Failed to load the offset file for side input store:" + storeName, e);
         }
-      });
+      }
+    });
 
     return fileOffsets;
   }
@@ -211,10 +211,10 @@ public class TaskSideInputStorageManager {
 
   private void validateStoreConfiguration(Map<String, StorageEngine> stores) {
     stores.forEach((storeName, storageEngine) -> {
-        if (storageEngine.getStoreProperties().isLoggedStore()) {
-          throw new SamzaException(
-              String.format("Cannot configure both side inputs and a changelog for store: %s.", storeName));
-        }
-      });
+      if (storageEngine.getStoreProperties().isLoggedStore()) {
+        throw new SamzaException(
+            String.format("Cannot configure both side inputs and a changelog for store: %s.", storeName));
+      }
+    });
   }
 }
\ No newline at end of file
diff --git a/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java b/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java
index c578d9a..e4633b7 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java
@@ -144,10 +144,10 @@ public class TransactionalStateTaskRestoreManager implements TaskRestoreManager
   public void stopPersistentStores() {
     TaskName taskName = taskModel.getTaskName();
     storeEngines.forEach((storeName, storeEngine) -> {
-        if (storeEngine.getStoreProperties().isPersistedToDisk())
-          storeEngine.stop();
-        LOG.info("Stopped persistent store: {} in task: {}", storeName, taskName);
-      });
+      if (storeEngine.getStoreProperties().isPersistedToDisk())
+        storeEngine.stop();
+      LOG.info("Stopped persistent store: {} in task: {}", storeName, taskName);
+    });
   }
 
   /**
@@ -208,210 +208,210 @@ public class TransactionalStateTaskRestoreManager implements TaskRestoreManager
     Map<String, RestoreOffsets> storesToRestore = new HashMap<>();
 
     storeEngines.forEach((storeName, storageEngine) -> {
-        // do nothing if store is non persistent and not logged (e.g. in memory cache only)
-        if (!storageEngine.getStoreProperties().isPersistedToDisk() &&
+      // do nothing if store is non persistent and not logged (e.g. in memory cache only)
+      if (!storageEngine.getStoreProperties().isPersistedToDisk() &&
+        !storageEngine.getStoreProperties().isLoggedStore()) {
+        return;
+      }
+
+      // persistent but non-logged stores are always deleted
+      if (storageEngine.getStoreProperties().isPersistedToDisk() &&
           !storageEngine.getStoreProperties().isLoggedStore()) {
-          return;
-        }
-
-        // persistent but non-logged stores are always deleted
-        if (storageEngine.getStoreProperties().isPersistedToDisk() &&
-            !storageEngine.getStoreProperties().isLoggedStore()) {
-          File currentDir = storageManagerUtil.getTaskStoreDir(
-              nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
-          LOG.info("Marking current directory: {} for store: {} in task: {} for deletion since it is not a logged store.",
-              currentDir, storeName, taskName);
-          storeDirsToDelete.put(storeName, currentDir);
-          // persistent but non-logged stores should not have checkpoint dirs
-          return;
-        }
-
-        // get the oldest and newest current changelog SSP offsets as well as the checkpointed changelog SSP offset
-        SystemStream changelog = storeChangelogs.get(storeName);
-        SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
-        SystemAdmin admin = systemAdmins.getSystemAdmin(changelogSSP.getSystem());
-        SystemStreamPartitionMetadata changelogSSPMetadata = currentChangelogOffsets.get(changelogSSP);
-        String oldestOffset = changelogSSPMetadata.getOldestOffset();
-        String newestOffset = changelogSSPMetadata.getNewestOffset();
-
-        String checkpointMessage = checkpointedChangelogOffsets.get(changelogSSP);
-        String checkpointedOffset = null;  // can be null if no message, or message has null offset
-        long timeSinceLastCheckpointInMs = Long.MAX_VALUE;
-        if (StringUtils.isNotBlank(checkpointMessage)) {
-          CheckpointedChangelogOffset checkpointedChangelogOffset = CheckpointedChangelogOffset.fromString(checkpointMessage);
-          checkpointedOffset = checkpointedChangelogOffset.getOffset();
-          timeSinceLastCheckpointInMs = System.currentTimeMillis() -
-              checkpointedChangelogOffset.getCheckpointId().getMillis();
-        }
-
-        // if the clean.store.start config is set, delete current and checkpoint dirs, restore from oldest offset to checkpointed
-        if (storageEngine.getStoreProperties().isPersistedToDisk() && new StorageConfig(
-          config).getCleanLoggedStoreDirsOnStart(storeName)) {
-          File currentDir = storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskName, taskMode);
-          LOG.info("Marking current directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
-              currentDir, storeName, taskName);
-          storeDirsToDelete.put(storeName, currentDir);
-
-          storageManagerUtil.getTaskStoreCheckpointDirs(loggedStoreBaseDirectory, storeName, taskName, taskMode)
-              .forEach(checkpointDir -> {
-                  LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
-                      checkpointDir, storeName, taskName);
-                  storeDirsToDelete.put(storeName, checkpointDir);
-                });
-
-          LOG.info("Marking restore offsets for store: {} in task: {} to {}, {} ", storeName, taskName, oldestOffset, checkpointedOffset);
-          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
-          return;
-        }
-
-        Optional<File> currentDirOptional;
-        Optional<List<File>> checkpointDirsOptional;
+        File currentDir = storageManagerUtil.getTaskStoreDir(
+            nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
+        LOG.info("Marking current directory: {} for store: {} in task: {} for deletion since it is not a logged store.",
+            currentDir, storeName, taskName);
+        storeDirsToDelete.put(storeName, currentDir);
+        // persistent but non-logged stores should not have checkpoint dirs
+        return;
+      }
+
+      // get the oldest and newest current changelog SSP offsets as well as the checkpointed changelog SSP offset
+      SystemStream changelog = storeChangelogs.get(storeName);
+      SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
+      SystemAdmin admin = systemAdmins.getSystemAdmin(changelogSSP.getSystem());
+      SystemStreamPartitionMetadata changelogSSPMetadata = currentChangelogOffsets.get(changelogSSP);
+      String oldestOffset = changelogSSPMetadata.getOldestOffset();
+      String newestOffset = changelogSSPMetadata.getNewestOffset();
+
+      String checkpointMessage = checkpointedChangelogOffsets.get(changelogSSP);
+      String checkpointedOffset = null;  // can be null if no message, or message has null offset
+      long timeSinceLastCheckpointInMs = Long.MAX_VALUE;
+      if (StringUtils.isNotBlank(checkpointMessage)) {
+        CheckpointedChangelogOffset checkpointedChangelogOffset = CheckpointedChangelogOffset.fromString(checkpointMessage);
+        checkpointedOffset = checkpointedChangelogOffset.getOffset();
+        timeSinceLastCheckpointInMs = System.currentTimeMillis() -
+            checkpointedChangelogOffset.getCheckpointId().getMillis();
+      }
+
+      // if the clean.store.start config is set, delete current and checkpoint dirs, restore from oldest offset to checkpointed
+      if (storageEngine.getStoreProperties().isPersistedToDisk() && new StorageConfig(
+        config).getCleanLoggedStoreDirsOnStart(storeName)) {
+        File currentDir = storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskName, taskMode);
+        LOG.info("Marking current directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
+            currentDir, storeName, taskName);
+        storeDirsToDelete.put(storeName, currentDir);
+
+        storageManagerUtil.getTaskStoreCheckpointDirs(loggedStoreBaseDirectory, storeName, taskName, taskMode)
+            .forEach(checkpointDir -> {
+              LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
+                  checkpointDir, storeName, taskName);
+              storeDirsToDelete.put(storeName, checkpointDir);
+            });
+
+        LOG.info("Marking restore offsets for store: {} in task: {} to {}, {} ", storeName, taskName, oldestOffset, checkpointedOffset);
+        storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
+        return;
+      }
+
+      Optional<File> currentDirOptional;
+      Optional<List<File>> checkpointDirsOptional;
+
+      if (!storageEngine.getStoreProperties().isPersistedToDisk()) {
+        currentDirOptional = Optional.empty();
+        checkpointDirsOptional = Optional.empty();
+      } else {
+        currentDirOptional = Optional.of(storageManagerUtil.getTaskStoreDir(
+            loggedStoreBaseDirectory, storeName, taskName, taskMode));
+        checkpointDirsOptional = Optional.of(storageManagerUtil.getTaskStoreCheckpointDirs(
+            loggedStoreBaseDirectory, storeName, taskName, taskMode));
+      }
+
+      LOG.info("For store: {} in task: {} got current dir: {}, checkpoint dirs: {}, checkpointed changelog offset: {}",
+          storeName, taskName, currentDirOptional, checkpointDirsOptional, checkpointedOffset);
+
+      currentDirOptional.ifPresent(currentDir -> {
+        LOG.info("Marking current directory: {} for store: {} in task: {} for deletion.",
+            currentDir, storeName, taskName);
+        storeDirsToDelete.put(storeName, currentDir);
+      });
 
-        if (!storageEngine.getStoreProperties().isPersistedToDisk()) {
-          currentDirOptional = Optional.empty();
-          checkpointDirsOptional = Optional.empty();
+      if (checkpointedOffset == null && oldestOffset != null) {
+        // this can mean that either this is the initial migration for this feature and there are no previously
+        // checkpointed changelog offsets, or that this is a new store or changelog topic after the initial migration.
+
+        // if this is the first time migration, it might be desirable to retain existing data.
+        // if this is new store or topic, it is possible that the container previously died after writing some data to
+        // the changelog but before a commit, so it is desirable to delete the store, not restore anything and
+        // trim the changelog
+
+        // since we can't tell the difference b/w the two scenarios by just looking at the store and changelogs,
+        // we'll request users to indicate whether to retain existing data using a config flag. this flag should only
+        // be set during migrations, and turned off after the first successful commit of the new container (i.e. next
+        // deploy). for simplicity, we'll always delete the local store, and restore from changelog if necessary.
+
+        // the former scenario should not be common. the recommended way to opt-in to the transactional state feature
+        // is to first upgrade to the latest samza version but keep the transactional state restore config off.
+        // this will create the store checkpoint directories and write the changelog offset to the checkpoint, but
+        // will not use them during restore. once this is done (i.e. at least one commit after upgrade), the
+        // transactional state restore feature can be turned on on subsequent deploys. this code path exists as a
+        // fail-safe against clearing changelogs in case users do not follow upgrade instructions and enable the
+        // feature directly.
+        checkpointDirsOptional.ifPresent(checkpointDirs ->
+            checkpointDirs.forEach(checkpointDir -> {
+              LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since checkpointed " +
+                      "offset is null and oldest offset: {} is not.",
+                  checkpointDir, storeName, taskName, oldestOffset);
+              storeDirsToDelete.put(storeName, checkpointDir);
+            }));
+
+        if (new TaskConfig(config).getTransactionalStateRetainExistingState()) {
+          // mark for restore from (oldest, newest) to recreate local state.
+          LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is true, " +
+              "local state will be fully restored from current changelog contents. " +
+              "There is no transactional local state guarantee.", storeName, taskName);
+          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
         } else {
-          currentDirOptional = Optional.of(storageManagerUtil.getTaskStoreDir(
-              loggedStoreBaseDirectory, storeName, taskName, taskMode));
-          checkpointDirsOptional = Optional.of(storageManagerUtil.getTaskStoreCheckpointDirs(
-              loggedStoreBaseDirectory, storeName, taskName, taskMode));
+          LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is false, " +
+              "any local state and changelog topic contents will be deleted.", storeName, taskName);
+          // mark for restore from (oldest, null) to trim entire changelog.
+          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, null));
         }
-
-        LOG.info("For store: {} in task: {} got current dir: {}, checkpoint dirs: {}, checkpointed changelog offset: {}",
-            storeName, taskName, currentDirOptional, checkpointDirsOptional, checkpointedOffset);
-
-        currentDirOptional.ifPresent(currentDir -> {
-            LOG.info("Marking current directory: {} for store: {} in task: {} for deletion.",
-                currentDir, storeName, taskName);
-            storeDirsToDelete.put(storeName, currentDir);
-          });
-
-        if (checkpointedOffset == null && oldestOffset != null) {
-          // this can mean that either this is the initial migration for this feature and there are no previously
-          // checkpointed changelog offsets, or that this is a new store or changelog topic after the initial migration.
-
-          // if this is the first time migration, it might be desirable to retain existing data.
-          // if this is new store or topic, it is possible that the container previously died after writing some data to
-          // the changelog but before a commit, so it is desirable to delete the store, not restore anything and
-          // trim the changelog
-
-          // since we can't tell the difference b/w the two scenarios by just looking at the store and changelogs,
-          // we'll request users to indicate whether to retain existing data using a config flag. this flag should only
-          // be set during migrations, and turned off after the first successful commit of the new container (i.e. next
-          // deploy). for simplicity, we'll always delete the local store, and restore from changelog if necessary.
-
-          // the former scenario should not be common. the recommended way to opt-in to the transactional state feature
-          // is to first upgrade to the latest samza version but keep the transactional state restore config off.
-          // this will create the store checkpoint directories and write the changelog offset to the checkpoint, but
-          // will not use them during restore. once this is done (i.e. at least one commit after upgrade), the
-          // transactional state restore feature can be turned on on subsequent deploys. this code path exists as a
-          // fail-safe against clearing changelogs in case users do not follow upgrade instructions and enable the
-          // feature directly.
-          checkpointDirsOptional.ifPresent(checkpointDirs ->
-              checkpointDirs.forEach(checkpointDir -> {
-                  LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since checkpointed " +
-                          "offset is null and oldest offset: {} is not.",
-                      checkpointDir, storeName, taskName, oldestOffset);
-                  storeDirsToDelete.put(storeName, checkpointDir);
-                }));
-
-          if (new TaskConfig(config).getTransactionalStateRetainExistingState()) {
-            // mark for restore from (oldest, newest) to recreate local state.
-            LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is true, " +
-                "local state will be fully restored from current changelog contents. " +
-                "There is no transactional local state guarantee.", storeName, taskName);
-            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
+      } else if (// check if the checkpointed offset is out of range of current oldest and newest offsets
+          admin.offsetComparator(oldestOffset, checkpointedOffset) > 0 ||
+          admin.offsetComparator(checkpointedOffset, newestOffset) > 0) {
+        // checkpointed offset is out of range. this could mean that this is a TTL topic and the checkpointed
+        // offset was TTLd, or that the changelog topic was manually deleted and then recreated.
+        // we cannot guarantee transactional state for TTL stores, so delete everything and do a full restore
+        // for local store. if the topic was deleted and recreated, this will have the side effect of
+        // clearing the store as well.
+        LOG.warn("Checkpointed offset: {} for store: {} in task: {} is out of range of oldest: {} or newest: {} offset." +
+                "Deleting existing store and fully restoring from changelog topic from oldest to newest offset. If the topic " +
+                "has time-based retention, there is no transactional local state guarantees. If the topic was changed," +
+                "local state will be cleaned up and fully restored to match the new topic contents.",
+            checkpointedOffset, storeName, taskName, oldestOffset, newestOffset);
+        checkpointDirsOptional.ifPresent(checkpointDirs ->
+            checkpointDirs.forEach(checkpointDir -> storeDirsToDelete.put(storeName, checkpointDir)));
+        storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
+      } else { // happy path. checkpointed offset is in range of current oldest and newest offsets
+        if (!checkpointDirsOptional.isPresent()) { // non-persistent logged store
+          LOG.info("Did not find any checkpoint directories for logged (maybe non-persistent) store: {}. Local state " +
+              "will be fully restored from current changelog contents.", storeName);
+          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
+        } else { // persistent logged store
+          String targetOffset;
+
+          // check checkpoint time against min.compaction.lag.ms. if older, restore from checkpointed offset to newest
+          // with no trim. be conservative. allow 10% safety margin to avoid deletions when the downtime is close
+          // to min.compaction.lag.ms
+          long minCompactionLagMs = new StorageConfig(config).getChangelogMinCompactionLagMs(storeName);
+          if (timeSinceLastCheckpointInMs > .9 * minCompactionLagMs) {
+            LOG.warn("Checkpointed offset for store: {} in task: {} is: {}. It is in range of oldest: {} and " +
+                "newest: {} changelog offset. However, time since last checkpoint is: {}, which is greater than " +
+                "0.9 * min.compaction.lag.ms: {} for the changelog topic. Since there is a chance that" +
+                "the changelog topic has been compacted, restoring store to the end of the current changelog contents." +
+                "There is no transactional local state guarantee.", storeName, taskName, checkpointedOffset,
+                oldestOffset, newestOffset, timeSinceLastCheckpointInMs, minCompactionLagMs);
+            targetOffset = newestOffset;
           } else {
-            LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is false, " +
-                "any local state and changelog topic contents will be deleted.", storeName, taskName);
-            // mark for restore from (oldest, null) to trim entire changelog.
-            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, null));
+            targetOffset = checkpointedOffset;
           }
-        } else if (// check if the checkpointed offset is out of range of current oldest and newest offsets
-            admin.offsetComparator(oldestOffset, checkpointedOffset) > 0 ||
-            admin.offsetComparator(checkpointedOffset, newestOffset) > 0) {
-          // checkpointed offset is out of range. this could mean that this is a TTL topic and the checkpointed
-          // offset was TTLd, or that the changelog topic was manually deleted and then recreated.
-          // we cannot guarantee transactional state for TTL stores, so delete everything and do a full restore
-          // for local store. if the topic was deleted and recreated, this will have the side effect of
-          // clearing the store as well.
-          LOG.warn("Checkpointed offset: {} for store: {} in task: {} is out of range of oldest: {} or newest: {} offset." +
-                  "Deleting existing store and fully restoring from changelog topic from oldest to newest offset. If the topic " +
-                  "has time-based retention, there is no transactional local state guarantees. If the topic was changed," +
-                  "local state will be cleaned up and fully restored to match the new topic contents.",
-              checkpointedOffset, storeName, taskName, oldestOffset, newestOffset);
-          checkpointDirsOptional.ifPresent(checkpointDirs ->
-              checkpointDirs.forEach(checkpointDir -> storeDirsToDelete.put(storeName, checkpointDir)));
-          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
-        } else { // happy path. checkpointed offset is in range of current oldest and newest offsets
-          if (!checkpointDirsOptional.isPresent()) { // non-persistent logged store
-            LOG.info("Did not find any checkpoint directories for logged (maybe non-persistent) store: {}. Local state " +
-                "will be fully restored from current changelog contents.", storeName);
-            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
-          } else { // persistent logged store
-            String targetOffset;
-
-            // check checkpoint time against min.compaction.lag.ms. if older, restore from checkpointed offset to newest
-            // with no trim. be conservative. allow 10% safety margin to avoid deletions when the downtime is close
-            // to min.compaction.lag.ms
-            long minCompactionLagMs = new StorageConfig(config).getChangelogMinCompactionLagMs(storeName);
-            if (timeSinceLastCheckpointInMs > .9 * minCompactionLagMs) {
-              LOG.warn("Checkpointed offset for store: {} in task: {} is: {}. It is in range of oldest: {} and " +
-                  "newest: {} changelog offset. However, time since last checkpoint is: {}, which is greater than " +
-                  "0.9 * min.compaction.lag.ms: {} for the changelog topic. Since there is a chance that" +
-                  "the changelog topic has been compacted, restoring store to the end of the current changelog contents." +
-                  "There is no transactional local state guarantee.", storeName, taskName, checkpointedOffset,
-                  oldestOffset, newestOffset, timeSinceLastCheckpointInMs, minCompactionLagMs);
-              targetOffset = newestOffset;
-            } else {
-              targetOffset = checkpointedOffset;
-            }
 
-            // if there exists a valid store checkpoint directory with oldest offset <= local offset <= target offset,
-            // retain it and restore the delta. delete all other checkpoint directories for the store. if more than one such
-            // checkpoint directory exists, retain the one with the highest local offset and delete the rest.
-            boolean hasValidCheckpointDir = false;
-            for (File checkpointDir: checkpointDirsOptional.get()) {
-              if (storageManagerUtil.isLoggedStoreValid(
-                  storeName, checkpointDir, config, storeChangelogs, taskModel, clock, storeEngines)) {
-                String localOffset = storageManagerUtil.readOffsetFile(
-                    checkpointDir, Collections.singleton(changelogSSP), false).get(changelogSSP);
-                LOG.info("Read local offset: {} for store: {} checkpoint dir: {} in task: {}", localOffset, storeName,
-                    checkpointDir, taskName);
-
-                if (admin.offsetComparator(localOffset, oldestOffset) >= 0 &&
-                    admin.offsetComparator(localOffset, targetOffset) <= 0 &&
-                    (storesToRestore.get(storeName) == null ||
-                        admin.offsetComparator(localOffset, storesToRestore.get(storeName).startingOffset) > 0)) {
-                  hasValidCheckpointDir = true;
-                  LOG.info("Temporarily marking checkpoint dir: {} for store: {} in task: {} for retention. " +
-                      "May be overridden later.", checkpointDir, storeName, taskName);
-                  storeDirToRetain.put(storeName, checkpointDir);
-                  // mark for restore even if local == checkpointed, so that the changelog gets trimmed.
-                  LOG.info("Temporarily marking store: {} in task: {} for restore from beginning offset: {} to " +
-                      "ending offset: {}. May be overridden later", storeName, taskName, localOffset, targetOffset);
-                  storesToRestore.put(storeName, new RestoreOffsets(localOffset, targetOffset));
-                }
+          // if there exists a valid store checkpoint directory with oldest offset <= local offset <= target offset,
+          // retain it and restore the delta. delete all other checkpoint directories for the store. if more than one such
+          // checkpoint directory exists, retain the one with the highest local offset and delete the rest.
+          boolean hasValidCheckpointDir = false;
+          for (File checkpointDir: checkpointDirsOptional.get()) {
+            if (storageManagerUtil.isLoggedStoreValid(
+                storeName, checkpointDir, config, storeChangelogs, taskModel, clock, storeEngines)) {
+              String localOffset = storageManagerUtil.readOffsetFile(
+                  checkpointDir, Collections.singleton(changelogSSP), false).get(changelogSSP);
+              LOG.info("Read local offset: {} for store: {} checkpoint dir: {} in task: {}", localOffset, storeName,
+                  checkpointDir, taskName);
+
+              if (admin.offsetComparator(localOffset, oldestOffset) >= 0 &&
+                  admin.offsetComparator(localOffset, targetOffset) <= 0 &&
+                  (storesToRestore.get(storeName) == null ||
+                      admin.offsetComparator(localOffset, storesToRestore.get(storeName).startingOffset) > 0)) {
+                hasValidCheckpointDir = true;
+                LOG.info("Temporarily marking checkpoint dir: {} for store: {} in task: {} for retention. " +
+                    "May be overridden later.", checkpointDir, storeName, taskName);
+                storeDirToRetain.put(storeName, checkpointDir);
+                // mark for restore even if local == checkpointed, so that the changelog gets trimmed.
+                LOG.info("Temporarily marking store: {} in task: {} for restore from beginning offset: {} to " +
+                    "ending offset: {}. May be overridden later", storeName, taskName, localOffset, targetOffset);
+                storesToRestore.put(storeName, new RestoreOffsets(localOffset, targetOffset));
               }
             }
+          }
 
-            // delete all non-retained checkpoint directories
-            for (File checkpointDir: checkpointDirsOptional.get()) {
-              if (storeDirToRetain.get(storeName) == null ||
-                  !storeDirToRetain.get(storeName).equals(checkpointDir)) {
-                LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since it is not " +
-                    "marked for retention.", checkpointDir, storeName, taskName);
-                storeDirsToDelete.put(storeName, checkpointDir);
-              }
+          // delete all non-retained checkpoint directories
+          for (File checkpointDir: checkpointDirsOptional.get()) {
+            if (storeDirToRetain.get(storeName) == null ||
+                !storeDirToRetain.get(storeName).equals(checkpointDir)) {
+              LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since it is not " +
+                  "marked for retention.", checkpointDir, storeName, taskName);
+              storeDirsToDelete.put(storeName, checkpointDir);
             }
+          }
 
-            // if the store had not valid checkpoint dirs to retain, restore from changelog
-            if (!hasValidCheckpointDir) {
-              storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, targetOffset));
-            }
+          // if the store had not valid checkpoint dirs to retain, restore from changelog
+          if (!hasValidCheckpointDir) {
+            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, targetOffset));
           }
         }
-      });
+      }
+    });
 
     LOG.info("Store directories to be retained in Task: {} are: {}", taskName, storeDirToRetain);
     LOG.info("Store directories to be deleted in Task: {} are: {}", taskName, storeDirsToDelete);
@@ -447,48 +447,48 @@ public class TransactionalStateTaskRestoreManager implements TaskRestoreManager
 
     // delete all persistent store directories marked for deletion
     storeDirsToDelete.entries().forEach(entry -> {
-        String storeName = entry.getKey();
-        File storeDirToDelete = entry.getValue();
-        LOG.info("Deleting persistent store directory: {} for store: {} in task: {}",
-            storeDirToDelete, storeName, taskName);
-        fileUtil.rm(storeDirToDelete);
-      });
+      String storeName = entry.getKey();
+      File storeDirToDelete = entry.getValue();
+      LOG.info("Deleting persistent store directory: {} for store: {} in task: {}",
+          storeDirToDelete, storeName, taskName);
+      fileUtil.rm(storeDirToDelete);
+    });
 
     // rename all retained persistent logged store checkpoint directories to current directory
     storeDirsToRetain.forEach((storeName, storeDirToRetain) -> {
-        File currentDir = storageManagerUtil.getTaskStoreDir(
-            loggedStoreBaseDirectory, storeName, taskName, taskMode);
-        LOG.info("Moving logged store checkpoint directory: {} for store: {} in task: {} to current directory: {}",
-            storeDirsToRetain.toString(), storeName, taskName, currentDir);
-        storageManagerUtil.restoreCheckpointFiles(storeDirToRetain, currentDir);
-        // do not remove the checkpoint directory yet. in case commit fails and container restarts,
-        // we can retry the move. if we delete the checkpoint, the current dir will be deleted as well on
-        // restart, and we will have to do a full restore.
-      });
+      File currentDir = storageManagerUtil.getTaskStoreDir(
+          loggedStoreBaseDirectory, storeName, taskName, taskMode);
+      LOG.info("Moving logged store checkpoint directory: {} for store: {} in task: {} to current directory: {}",
+          storeDirsToRetain.toString(), storeName, taskName, currentDir);
+      storageManagerUtil.restoreCheckpointFiles(storeDirToRetain, currentDir);
+      // do not remove the checkpoint directory yet. in case commit fails and container restarts,
+      // we can retry the move. if we delete the checkpoint, the current dir will be deleted as well on
+      // restart, and we will have to do a full restore.
+    });
 
     // create any missing (not retained) current directories for persistent stores
     storeEngines.forEach((storeName, storageEngine) -> {
-        if (storageEngine.getStoreProperties().isPersistedToDisk()) {
-          File currentDir;
-          if (storageEngine.getStoreProperties().isLoggedStore()) {
-            currentDir = storageManagerUtil.getTaskStoreDir(
-                loggedStoreBaseDirectory, storeName, taskName, taskMode);
-          } else {
-            currentDir = storageManagerUtil.getTaskStoreDir(
-                nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
-          }
+      if (storageEngine.getStoreProperties().isPersistedToDisk()) {
+        File currentDir;
+        if (storageEngine.getStoreProperties().isLoggedStore()) {
+          currentDir = storageManagerUtil.getTaskStoreDir(
+              loggedStoreBaseDirectory, storeName, taskName, taskMode);
+        } else {
+          currentDir = storageManagerUtil.getTaskStoreDir(
+              nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
+        }
 
-          try {
-            if (!fileUtil.exists(currentDir.toPath())) {
-              LOG.info("Creating missing persistent store current directory: {} for store: {} in task: {}",
-                  currentDir, storeName, taskName);
-              fileUtil.createDirectories(currentDir.toPath());
-            }
-          } catch (Exception e) {
-            throw new SamzaException(String.format("Error setting up current directory for store: %s", storeName), e);
+        try {
+          if (!fileUtil.exists(currentDir.toPath())) {
+            LOG.info("Creating missing persistent store current directory: {} for store: {} in task: {}",
+                currentDir, storeName, taskName);
+            fileUtil.createDirectories(currentDir.toPath());
           }
+        } catch (Exception e) {
+          throw new SamzaException(String.format("Error setting up current directory for store: %s", storeName), e);
         }
-      });
+      }
+    });
   }
 
   /**
@@ -509,39 +509,39 @@ public class TransactionalStateTaskRestoreManager implements TaskRestoreManager
     // hence we register upcoming offset as the dummy offset by default and override it later if necessary.
     // using upcoming offset ensures that no messages are replayed by default.
     storeChangelogs.forEach((storeName, changelog) -> {
-        SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
-        SystemConsumer systemConsumer = storeConsumers.get(storeName);
-        SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
-        String upcomingOffset = currentOffsets.getUpcomingOffset();
-        LOG.info("Temporarily registering upcoming offset: {} as the starting offest for changelog ssp: {}. " +
-            "This might be overridden later for stores that need restoring.", upcomingOffset, changelogSSP);
-        systemConsumer.register(changelogSSP, upcomingOffset);
-      });
+      SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
+      SystemConsumer systemConsumer = storeConsumers.get(storeName);
+      SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
+      String upcomingOffset = currentOffsets.getUpcomingOffset();
+      LOG.info("Temporarily registering upcoming offset: {} as the starting offest for changelog ssp: {}. " +
+          "This might be overridden later for stores that need restoring.", upcomingOffset, changelogSSP);
+      systemConsumer.register(changelogSSP, upcomingOffset);
+    });
 
     // now register the actual starting offset if necessary. system consumer will ensure that the lower of the
     // two registered offsets is used as the starting offset.
     storesToRestore.forEach((storeName, restoreOffsets) -> {
-        SystemStream changelog = storeChangelogs.get(storeName);
-        SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
-        SystemAdmin systemAdmin = systemAdmins.getSystemAdmin(changelog.getSystem());
-        validateRestoreOffsets(restoreOffsets, systemAdmin);
-
-        SystemConsumer systemConsumer = storeConsumers.get(storeName);
-        SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
-        String oldestOffset = currentOffsets.getOldestOffset();
-
-        // if the starting offset equals oldest offset (e.g. for full restore), start from the oldest offset (inclusive).
-        // else, start from the next (upcoming) offset.
-        String startingOffset;
-        if (systemAdmin.offsetComparator(restoreOffsets.startingOffset, oldestOffset) == 0) {
-          startingOffset = oldestOffset;
-        } else {
-          Map<SystemStreamPartition, String> offsetMap = ImmutableMap.of(changelogSSP, restoreOffsets.startingOffset);
-          startingOffset = systemAdmin.getOffsetsAfter(offsetMap).get(changelogSSP);
-        }
-        LOG.info("Registering starting offset: {} for changelog ssp: {}", startingOffset, changelogSSP);
-        systemConsumer.register(changelogSSP, startingOffset);
-      });
+      SystemStream changelog = storeChangelogs.get(storeName);
+      SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
+      SystemAdmin systemAdmin = systemAdmins.getSystemAdmin(changelog.getSystem());
+      validateRestoreOffsets(restoreOffsets, systemAdmin);
+
+      SystemConsumer systemConsumer = storeConsumers.get(storeName);
+      SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
+      String oldestOffset = currentOffsets.getOldestOffset();
+
+      // if the starting offset equals oldest offset (e.g. for full restore), start from the oldest offset (inclusive).
+      // else, start from the next (upcoming) offset.
+      String startingOffset;
+      if (systemAdmin.offsetComparator(restoreOffsets.startingOffset, oldestOffset) == 0) {
+        startingOffset = oldestOffset;
+      } else {
+        Map<SystemStreamPartition, String> offsetMap = ImmutableMap.of(changelogSSP, restoreOffsets.startingOffset);
+        startingOffset = systemAdmin.getOffsetsAfter(offsetMap).get(changelogSSP);
+      }
+      LOG.info("Registering starting offset: {} for changelog ssp: {}", startingOffset, changelogSSP);
+      systemConsumer.register(changelogSSP, startingOffset);
+    });
   }
 
   private static void validateRestoreOffsets(RestoreOffsets restoreOffsets, SystemAdmin systemAdmin) {
diff --git a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java
index 9055819..13ebf6e 100644
--- a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java
+++ b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java
@@ -111,9 +111,7 @@ class InMemoryManager {
   Map<SystemStreamPartition, List<IncomingMessageEnvelope>> poll(Map<SystemStreamPartition, String> sspsToOffsets) {
     return sspsToOffsets.entrySet()
         .stream()
-        .collect(Collectors.toMap(
-            Map.Entry::getKey,
-            entry -> poll(entry.getKey(), entry.getValue())));
+        .collect(Collectors.toMap(Map.Entry::getKey, entry -> poll(entry.getKey(), entry.getValue())));
   }
 
   /**
@@ -155,9 +153,8 @@ class InMemoryManager {
 
     return result.entrySet()
         .stream()
-        .collect(Collectors.toMap(
-            Map.Entry::getKey,
-            entry -> constructSystemStreamMetadata(entry.getKey(), entry.getValue())));
+        .collect(Collectors.toMap(Map.Entry::getKey,
+          entry -> constructSystemStreamMetadata(entry.getKey(), entry.getValue())));
   }
 
   /**
@@ -180,39 +177,39 @@ class InMemoryManager {
             .entrySet()
             .stream()
             .collect(Collectors.toMap(entry -> entry.getKey().getPartition(), entry -> {
-                List<IncomingMessageEnvelope> messages = entry.getValue();
-                Integer oldestOffset;
-                Integer newestOffset;
-                int upcomingOffset;
-
-                if (messages.isEmpty()) {
+              List<IncomingMessageEnvelope> messages = entry.getValue();
+              Integer oldestOffset;
+              Integer newestOffset;
+              int upcomingOffset;
+
+              if (messages.isEmpty()) {
+                oldestOffset = null;
+                newestOffset = null;
+                upcomingOffset = 0;
+              } else if (messages.get(messages.size() - 1).isEndOfStream()) {
+                if (messages.size() > 1) {
+                  // don't count end of stream in offset indices
+                  oldestOffset = 0;
+                  newestOffset = messages.size() - 2;
+                  upcomingOffset = messages.size() - 1;
+                } else {
+                  // end of stream is the only message, treat the same as empty
                   oldestOffset = null;
                   newestOffset = null;
                   upcomingOffset = 0;
-                } else if (messages.get(messages.size() - 1).isEndOfStream()) {
-                  if (messages.size() > 1) {
-                    // don't count end of stream in offset indices
-                    oldestOffset = 0;
-                    newestOffset = messages.size() - 2;
-                    upcomingOffset = messages.size() - 1;
-                  } else {
-                    // end of stream is the only message, treat the same as empty
-                    oldestOffset = null;
-                    newestOffset = null;
-                    upcomingOffset = 0;
-                  }
-                } else {
-                  // offsets correspond strictly to numeric indices
-                  oldestOffset = 0;
-                  newestOffset = messages.size() - 1;
-                  upcomingOffset = messages.size();
                 }
-
-                return new SystemStreamMetadata.SystemStreamPartitionMetadata(
-                    oldestOffset == null ? null : oldestOffset.toString(),
-                    newestOffset == null ? null : newestOffset.toString(),
-                    Integer.toString(upcomingOffset));
-              }));
+              } else {
+                // offsets correspond strictly to numeric indices
+                oldestOffset = 0;
+                newestOffset = messages.size() - 1;
+                upcomingOffset = messages.size();
+              }
+
+              return new SystemStreamMetadata.SystemStreamPartitionMetadata(
+                  oldestOffset == null ? null : oldestOffset.toString(),
+                  newestOffset == null ? null : newestOffset.toString(),
+                  Integer.toString(upcomingOffset));
+            }));
 
     return new SystemStreamMetadata(streamName, partitionMetadata);
   }
diff --git a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java
index cb5478c..38ce2af 100644
--- a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java
+++ b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java
@@ -61,9 +61,9 @@ public class InMemorySystemAdmin implements SystemAdmin {
     return offsets.entrySet()
         .stream()
         .collect(Collectors.toMap(Map.Entry::getKey, entry -> {
-            String offset = entry.getValue();
-            return String.valueOf(Integer.valueOf(offset) + 1);
-          }));
+          String offset = entry.getValue();
+          return String.valueOf(Integer.valueOf(offset) + 1);
+        }));
   }
 
   /**
diff --git a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java
index 2c02b79..ae3d3c9 100644
--- a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java
+++ b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java
@@ -148,9 +148,9 @@ public class InMemorySystemConsumer implements SystemConsumer {
 
     for (Map.Entry<SystemStreamPartition, List<IncomingMessageEnvelope>> sspToMessage : result.entrySet()) {
       sspToOffset.computeIfPresent(sspToMessage.getKey(), (ssp, offset) -> {
-          int newOffset = Integer.parseInt(offset) + sspToMessage.getValue().size();
-          return String.valueOf(newOffset);
-        });
+        int newOffset = Integer.parseInt(offset) + sspToMessage.getValue().size();
+        return String.valueOf(newOffset);
+      });
       // absent should never be the case
     }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java b/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java
index 9e2f279..8b647ed 100644
--- a/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java
+++ b/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java
@@ -76,10 +76,10 @@ public class TableConfigGenerator {
     tableDescriptors.stream()
         .filter(d -> d instanceof LocalTableDescriptor)
         .forEach(d -> {
-            LocalTableDescriptor ld = (LocalTableDescriptor) d;
-            tableKeySerdes.put(ld.getTableId(), ld.getSerde().getKeySerde());
-            tableValueSerdes.put(ld.getTableId(), ld.getSerde().getValueSerde());
-          });
+          LocalTableDescriptor ld = (LocalTableDescriptor) d;
+          tableKeySerdes.put(ld.getTableId(), ld.getSerde().getKeySerde());
+          tableValueSerdes.put(ld.getTableId(), ld.getSerde().getValueSerde());
+        });
     serdes.addAll(tableKeySerdes.values());
     serdes.addAll(tableValueSerdes.values());
 
@@ -88,21 +88,21 @@ public class TableConfigGenerator {
     Base64.Encoder base64Encoder = Base64.getEncoder();
     Map<Serde, String> serdeUUIDs = new HashMap<>();
     serdes.forEach(serde -> {
-        String serdeName = serdeUUIDs.computeIfAbsent(serde,
-            s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
-        serdeConfigs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
-            base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
-      });
+      String serdeName = serdeUUIDs.computeIfAbsent(serde,
+        s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
+      serdeConfigs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
+          base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
+    });
 
     // Set key and msg serdes for tables to the serde names generated above
     tableKeySerdes.forEach((tableId, serde) -> {
-        String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
-        serdeConfigs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
+      serdeConfigs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
     tableValueSerdes.forEach((tableId, serde) -> {
-        String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
-        serdeConfigs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
+      serdeConfigs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
+    });
     return serdeConfigs;
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/table/TableManager.java b/samza-core/src/main/java/org/apache/samza/table/TableManager.java
index d3611f8..86899de 100644
--- a/samza-core/src/main/java/org/apache/samza/table/TableManager.java
+++ b/samza-core/src/main/java/org/apache/samza/table/TableManager.java
@@ -70,9 +70,9 @@ public class TableManager {
    */
   public TableManager(Config config) {
     new JavaTableConfig(config).getTableIds().forEach(tableId -> {
-        addTable(tableId, config);
-        logger.debug("Added table " + tableId);
-      });
+      addTable(tableId, config);
+      logger.debug("Added table " + tableId);
+    });
     logger.info(String.format("Added %d tables", tableContexts.size()));
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java b/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java
index e061885..025a565 100644
--- a/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java
+++ b/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java
@@ -148,13 +148,13 @@ public class BatchProcessor<K, V> {
     final long maxDelay = batch.getMaxBatchDelay().toMillis();
     if (maxDelay != Integer.MAX_VALUE) {
       scheduledFuture = scheduledExecutorService.schedule(() -> {
-          lock.lock();
-          try {
-            processBatch(false);
-          } finally {
-            lock.unlock();
-          }
-        }, maxDelay, TimeUnit.MILLISECONDS);
+        lock.lock();
+        try {
+          processBatch(false);
+        } finally {
+          lock.unlock();
+        }
+      }, maxDelay, TimeUnit.MILLISECONDS);
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java b/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java
index 12ec6f6..67d8dd8 100644
--- a/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java
+++ b/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java
@@ -64,15 +64,15 @@ public class TableBatchHandler<K, V> implements BatchHandler<K, V> {
         table.getAllAsync(gets) : table.getAllAsync(gets, args);
 
     getsFuture.whenComplete((map, throwable) -> {
-        operations.forEach(operation -> {
-            GetOperation<K, V> getOperation = (GetOperation<K, V>) operation;
-            if (throwable != null) {
-              getOperation.completeExceptionally(throwable);
-            } else {
-              getOperation.complete(map.get(operation.getKey()));
-            }
-          });
+      operations.forEach(operation -> {
+        GetOperation<K, V> getOperation = (GetOperation<K, V>) operation;
+        if (throwable != null) {
+          getOperation.completeExceptionally(throwable);
+        } else {
+          getOperation.complete(map.get(operation.getKey()));
+        }
       });
+    });
     return getsFuture;
   }
 
@@ -151,11 +151,11 @@ public class TableBatchHandler<K, V> implements BatchHandler<K, V> {
         handleBatchDelete(getDeleteOperations(batch)),
         handleBatchGet(getQueryOperations(batch)))
         .whenComplete((val, throwable) -> {
-            if (throwable != null) {
-              batch.completeExceptionally(throwable);
-            } else {
-              batch.complete();
-            }
-          });
+          if (throwable != null) {
+            batch.completeExceptionally(throwable);
+          } else {
+            batch.complete();
+          }
+        });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java b/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java
index dee0767..cb86cac 100644
--- a/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java
@@ -99,10 +99,10 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
     List<K> missKeys = new ArrayList<>();
     records.putAll(cache.getAll(keys, args));
     keys.forEach(k -> {
-        if (!records.containsKey(k)) {
-          missKeys.add(k);
-        }
-      });
+      if (!records.containsKey(k)) {
+        missKeys.add(k);
+      }
+    });
     return missKeys;
   }
 
@@ -128,16 +128,16 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
     missCount.incrementAndGet();
 
     return table.getAsync(key, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to get the record for " + key, e);
-        } else {
-          if (result != null) {
-            cache.put(key, result, args);
-          }
-          updateTimer(metrics.getNs, clock.nanoTime() - startNs);
-          return result;
+      if (e != null) {
+        throw new SamzaException("Failed to get the record for " + key, e);
+      } else {
+        if (result != null) {
+          cache.put(key, result, args);
         }
-      });
+        updateTimer(metrics.getNs, clock.nanoTime() - startNs);
+        return result;
+      }
+    });
   }
 
   @Override
@@ -162,19 +162,19 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
 
     long startNs = clock.nanoTime();
     return table.getAllAsync(missingKeys, args).handle((records, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to get records for " + keys, e);
-        } else {
-          if (records != null) {
-            cache.putAll(records.entrySet().stream()
-                .map(r -> new Entry<>(r.getKey(), r.getValue()))
-                .collect(Collectors.toList()), args);
-            getAllResult.putAll(records);
-          }
-          updateTimer(metrics.getAllNs, clock.nanoTime() - startNs);
-          return getAllResult;
+      if (e != null) {
+        throw new SamzaException("Failed to get records for " + keys, e);
+      } else {
+        if (records != null) {
+          cache.putAll(records.entrySet().stream()
+              .map(r -> new Entry<>(r.getKey(), r.getValue()))
+              .collect(Collectors.toList()), args);
+          getAllResult.putAll(records);
         }
-      });
+        updateTimer(metrics.getAllNs, clock.nanoTime() - startNs);
+        return getAllResult;
+      }
+    });
   }
 
   @Override
@@ -193,18 +193,18 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
 
     long startNs = clock.nanoTime();
     return table.putAsync(key, value, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException(String.format("Failed to put a record, key=%s, value=%s", key, value), e);
-        } else if (!isWriteAround) {
-          if (value == null) {
-            cache.delete(key, args);
-          } else {
-            cache.put(key, value, args);
-          }
+      if (e != null) {
+        throw new SamzaException(String.format("Failed to put a record, key=%s, value=%s", key, value), e);
+      } else if (!isWriteAround) {
+        if (value == null) {
+          cache.delete(key, args);
+        } else {
+          cache.put(key, value, args);
         }
-        updateTimer(metrics.putNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      }
+      updateTimer(metrics.putNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -222,15 +222,15 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
     long startNs = clock.nanoTime();
     Preconditions.checkNotNull(table, "Cannot write to a read-only table: " + table);
     return table.putAllAsync(records, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to put records " + records, e);
-        } else if (!isWriteAround) {
-          cache.putAll(records, args);
-        }
-
-        updateTimer(metrics.putAllNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to put records " + records, e);
+      } else if (!isWriteAround) {
+        cache.putAll(records, args);
+      }
+
+      updateTimer(metrics.putAllNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -248,14 +248,14 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
     long startNs = clock.nanoTime();
     Preconditions.checkNotNull(table, "Cannot delete from a read-only table: " + table);
     return table.deleteAsync(key, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to delete the record for " + key, e);
-        } else if (!isWriteAround) {
-          cache.delete(key, args);
-        }
-        updateTimer(metrics.deleteNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to delete the record for " + key, e);
+      } else if (!isWriteAround) {
+        cache.delete(key, args);
+      }
+      updateTimer(metrics.deleteNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -273,14 +273,14 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
     long startNs = clock.nanoTime();
     Preconditions.checkNotNull(table, "Cannot delete from a read-only table: " + table);
     return table.deleteAllAsync(keys, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to delete the record for " + keys, e);
-        } else if (!isWriteAround) {
-          cache.deleteAll(keys, args);
-        }
-        updateTimer(metrics.deleteAllNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to delete the record for " + keys, e);
+      } else if (!isWriteAround) {
+        cache.deleteAll(keys, args);
+      }
+      updateTimer(metrics.deleteAllNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -288,12 +288,12 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
     incCounter(metrics.numReads);
     long startNs = clock.nanoTime();
     return table.readAsync(opId, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to read, opId=" + opId, e);
-        }
-        updateTimer(metrics.readNs, clock.nanoTime() - startNs);
-        return (T) result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to read, opId=" + opId, e);
+      }
+      updateTimer(metrics.readNs, clock.nanoTime() - startNs);
+      return (T) result;
+    });
   }
 
   @Override
@@ -301,12 +301,12 @@ public class CachingTable<K, V> extends BaseReadWriteTable<K, V>
     incCounter(metrics.numWrites);
     long startNs = clock.nanoTime();
     return table.writeAsync(opId, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to write, opId=" + opId, e);
-        }
-        updateTimer(metrics.writeNs, clock.nanoTime() - startNs);
-        return (T) result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to write, opId=" + opId, e);
+      }
+      updateTimer(metrics.writeNs, clock.nanoTime() - startNs);
+      return (T) result;
+    });
   }
 
   @Override
diff --git a/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java b/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java
index 02083f3..46c3c4e 100644
--- a/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java
@@ -140,12 +140,12 @@ public class GuavaCacheTable<K, V> extends BaseReadWriteTable<K, V>
       List<K> delKeys = new ArrayList<>();
       List<Entry<K, V>> putRecords = new ArrayList<>();
       entries.forEach(r -> {
-          if (r.getValue() != null) {
-            putRecords.add(r);
-          } else {
-            delKeys.add(r.getKey());
-          }
-        });
+        if (r.getValue() != null) {
+          putRecords.add(r);
+        } else {
+          delKeys.add(r.getKey());
+        }
+      });
 
       cache.invalidateAll(delKeys);
       putRecords.forEach(e -> put(e.getKey(), e.getValue()));
diff --git a/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java b/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java
index 75fed12..b652a15 100644
--- a/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java
@@ -66,57 +66,57 @@ public class AsyncRateLimitedTable<K, V> implements AsyncReadWriteTable<K, V> {
   @Override
   public CompletableFuture<V> getAsync(K key, Object ... args) {
     return doRead(
-        () -> readRateLimiter.throttle(key, args),
-        () -> table.getAsync(key, args));
+      () -> readRateLimiter.throttle(key, args),
+      () -> table.getAsync(key, args));
   }
 
   @Override
   public CompletableFuture<Map<K, V>> getAllAsync(List<K> keys, Object ... args) {
     return doRead(
-        () -> readRateLimiter.throttle(keys, args),
-        () -> table.getAllAsync(keys, args));
+      () -> readRateLimiter.throttle(keys, args),
+      () -> table.getAllAsync(keys, args));
   }
 
   @Override
   public <T> CompletableFuture<T> readAsync(int opId, Object ... args) {
     return doRead(
-        () -> readRateLimiter.throttle(opId, args),
-        () -> table.readAsync(opId, args));
+      () -> readRateLimiter.throttle(opId, args),
+      () -> table.readAsync(opId, args));
   }
 
   @Override
   public CompletableFuture<Void> putAsync(K key, V value, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(key, value, args),
-        () -> table.putAsync(key, value, args));
+      () -> writeRateLimiter.throttle(key, value, args),
+      () -> table.putAsync(key, value, args));
   }
 
   @Override
   public CompletableFuture<Void> putAllAsync(List<Entry<K, V>> entries, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttleRecords(entries),
-        () -> table.putAllAsync(entries, args));
+      () -> writeRateLimiter.throttleRecords(entries),
+      () -> table.putAllAsync(entries, args));
   }
 
   @Override
   public CompletableFuture<Void> deleteAsync(K key, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(key, args),
-        () -> table.deleteAsync(key, args));
+      () -> writeRateLimiter.throttle(key, args),
+      () -> table.deleteAsync(key, args));
   }
 
   @Override
   public CompletableFuture<Void> deleteAllAsync(List<K> keys, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(keys, args),
-        () -> table.deleteAllAsync(keys, args));
+      () -> writeRateLimiter.throttle(keys, args),
+      () -> table.deleteAllAsync(keys, args));
   }
 
   @Override
   public <T> CompletableFuture<T> writeAsync(int opId, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(opId, args),
-        () -> table.writeAsync(opId, args));
+      () -> writeRateLimiter.throttle(opId, args),
+      () -> table.writeAsync(opId, args));
   }
 
   @Override
diff --git a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java
index 6d6c23a..eb54de2 100644
--- a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java
@@ -173,14 +173,14 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
     Preconditions.checkNotNull(key, "null key");
     return instrument(() -> asyncTable.getAsync(key, args), metrics.numGets, metrics.getNs)
         .handle((result, e) -> {
-            if (e != null) {
-              throw new SamzaException("Failed to get the records for " + key, e);
-            }
-            if (result == null) {
-              incCounter(metrics.numMissedLookups);
-            }
-            return result;
-          });
+          if (e != null) {
+            throw new SamzaException("Failed to get the records for " + key, e);
+          }
+          if (result == null) {
+            incCounter(metrics.numMissedLookups);
+          }
+          return result;
+        });
   }
 
   @Override
@@ -200,12 +200,12 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
     }
     return instrument(() -> asyncTable.getAllAsync(keys, args), metrics.numGetAlls, metrics.getAllNs)
         .handle((result, e) -> {
-            if (e != null) {
-              throw new SamzaException("Failed to get the records for " + keys, e);
-            }
-            result.values().stream().filter(Objects::isNull).forEach(v -> incCounter(metrics.numMissedLookups));
-            return result;
-          });
+          if (e != null) {
+            throw new SamzaException("Failed to get the records for " + keys, e);
+          }
+          result.values().stream().filter(Objects::isNull).forEach(v -> incCounter(metrics.numMissedLookups));
+          return result;
+        });
   }
 
   @Override
@@ -221,8 +221,8 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
   public <T> CompletableFuture<T> readAsync(int opId, Object ... args) {
     return (CompletableFuture<T>) instrument(() -> asyncTable.readAsync(opId, args), metrics.numReads, metrics.readNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to read, opId=%d", opId), e);
-          });
+          throw new SamzaException(String.format("Failed to read, opId=%d", opId), e);
+        });
   }
 
   @Override
@@ -244,8 +244,8 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
 
     return instrument(() -> asyncTable.putAsync(key, value, args), metrics.numPuts, metrics.putNs)
         .exceptionally(e -> {
-            throw new SamzaException("Failed to put a record with key=" + key, (Throwable) e);
-          });
+          throw new SamzaException("Failed to put a record with key=" + key, (Throwable) e);
+        });
   }
 
   @Override
@@ -281,9 +281,9 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
         deleteFuture,
         instrument(() -> asyncTable.putAllAsync(putRecords, args), metrics.numPutAlls, metrics.putAllNs))
         .exceptionally(e -> {
-            String strKeys = records.stream().map(r -> r.getKey().toString()).collect(Collectors.joining(","));
-            throw new SamzaException(String.format("Failed to put records with keys=" + strKeys), e);
-          });
+          String strKeys = records.stream().map(r -> r.getKey().toString()).collect(Collectors.joining(","));
+          throw new SamzaException(String.format("Failed to put records with keys=" + strKeys), e);
+        });
   }
 
   @Override
@@ -301,8 +301,8 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
     Preconditions.checkNotNull(key, "null key");
     return instrument(() -> asyncTable.deleteAsync(key, args), metrics.numDeletes, metrics.deleteNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to delete the record for " + key), (Throwable) e);
-          });
+          throw new SamzaException(String.format("Failed to delete the record for " + key), (Throwable) e);
+        });
   }
 
   @Override
@@ -324,8 +324,8 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
 
     return instrument(() -> asyncTable.deleteAllAsync(keys, args), metrics.numDeleteAlls, metrics.deleteAllNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to delete records for " + keys), e);
-          });
+          throw new SamzaException(String.format("Failed to delete records for " + keys), e);
+        });
   }
 
   @Override
@@ -341,8 +341,8 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
   public <T> CompletableFuture<T> writeAsync(int opId, Object... args) {
     return (CompletableFuture<T>) instrument(() -> asyncTable.writeAsync(opId, args), metrics.numWrites, metrics.writeNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to write, opId=%d", opId), e);
-          });
+          throw new SamzaException(String.format("Failed to write, opId=%d", opId), e);
+        });
   }
 
   @Override
@@ -390,14 +390,14 @@ public final class RemoteTable<K, V> extends BaseReadWriteTable<K, V>
     CompletableFuture<T> ioFuture = func.apply();
     if (callbackExecutor != null) {
       ioFuture.thenApplyAsync(r -> {
-          updateTimer(timer, clock.nanoTime() - startNs);
-          return r;
-        }, callbackExecutor);
+        updateTimer(timer, clock.nanoTime() - startNs);
+        return r;
+      }, callbackExecutor);
     } else {
       ioFuture.thenApply(r -> {
-          updateTimer(timer, clock.nanoTime() - startNs);
-          return r;
-        });
+        updateTimer(timer, clock.nanoTime() - startNs);
+        return r;
+      });
     }
     return ioFuture;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java
index 8cd1c07..36078ef 100644
--- a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java
+++ b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java
@@ -100,33 +100,33 @@ public class RemoteTableProvider extends BaseTableProvider {
     if (callbackPoolSize > 0) {
       callbackExecutors.computeIfAbsent(tableId, (arg) ->
           Executors.newFixedThreadPool(callbackPoolSize, (runnable) -> {
-              Thread thread = new Thread(runnable);
-              thread.setName("table-" + tableId + "-async-callback-pool");
-              thread.setDaemon(true);
-              return thread;
-            }));
+            Thread thread = new Thread(runnable);
+            thread.setName("table-" + tableId + "-async-callback-pool");
+            thread.setDaemon(true);
+            return thread;
+          }));
     }
 
     boolean isRateLimited = readRateLimiter != null || writeRateLimiter != null;
     if (isRateLimited) {
       rateLimitingExecutors.computeIfAbsent(tableId, (arg) ->
           Executors.newSingleThreadExecutor(runnable -> {
-              Thread thread = new Thread(runnable);
-              thread.setName("table-" + tableId + "-async-executor");
-              thread.setDaemon(true);
-              return thread;
-            }));
+            Thread thread = new Thread(runnable);
+            thread.setName("table-" + tableId + "-async-executor");
+            thread.setDaemon(true);
+            return thread;
+          }));
     }
 
     BatchProvider batchProvider = deserializeObject(tableConfig, RemoteTableDescriptor.BATCH_PROVIDER);
     if (batchProvider != null) {
       batchExecutors.computeIfAbsent(tableId, (arg) ->
           Executors.newSingleThreadScheduledExecutor(runnable -> {
-              Thread thread = new Thread(runnable);
-              thread.setName("table-" + tableId + "-batch-scheduled-executor");
-              thread.setDaemon(true);
-              return thread;
-            }));
+            Thread thread = new Thread(runnable);
+            thread.setName("table-" + tableId + "-batch-scheduled-executor");
+            thread.setDaemon(true);
+            return thread;
+          }));
     }
 
 
@@ -162,11 +162,11 @@ public class RemoteTableProvider extends BaseTableProvider {
 
   private ScheduledExecutorService createRetryExecutor() {
     return Executors.newSingleThreadScheduledExecutor(runnable -> {
-        Thread thread = new Thread(runnable);
-        thread.setName("table-retry-executor");
-        thread.setDaemon(true);
-        return thread;
-      });
+      Thread thread = new Thread(runnable);
+      thread.setName("table-retry-executor");
+      thread.setDaemon(true);
+      return thread;
+    });
   }
 }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java b/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java
index b2eccd8..650d03a 100644
--- a/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java
+++ b/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java
@@ -89,15 +89,15 @@ class FailsafeAdapter {
     return Failsafe.with(retryPolicy).with(retryExec)
         .onRetry(e -> metrics.retryCount.inc())
         .onRetriesExceeded(e -> {
-            metrics.retryTimer.update(System.currentTimeMillis() - startMs);
-            metrics.permFailureCount.inc();
-          })
+          metrics.retryTimer.update(System.currentTimeMillis() - startMs);
+          metrics.permFailureCount.inc();
+        })
         .onSuccess((e, ctx) -> {
-            if (ctx.getExecutions() > 1) {
-              metrics.retryTimer.update(System.currentTimeMillis() - startMs);
-            } else {
-              metrics.successCount.inc();
-            }
-          });
+          if (ctx.getExecutions() > 1) {
+            metrics.retryTimer.update(System.currentTimeMillis() - startMs);
+          } else {
+            metrics.successCount.inc();
+          }
+        });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java b/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java
index a0ec7b8..5a474cd 100644
--- a/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java
+++ b/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java
@@ -140,12 +140,12 @@ public class StreamOperatorTask implements AsyncStreamTask, InitableTask, Window
           }
 
           processFuture.whenComplete((val, ex) -> {
-              if (ex != null) {
-                callback.failure(ex);
-              } else {
-                callback.complete();
-              }
-            });
+            if (ex != null) {
+              callback.failure(ex);
+            } else {
+              callback.complete();
+            }
+          });
         }
       } catch (Exception e) {
         LOG.error("Failed to process the incoming message due to ", e);
diff --git a/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java b/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java
index adb637e..60c86c1 100644
--- a/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java
+++ b/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java
@@ -75,15 +75,15 @@ public class EmbeddedTaggedRateLimiter implements RateLimiter {
     Stopwatch stopwatch = Stopwatch.createStarted();
     return tagToCreditsMap.entrySet().stream()
         .map(e -> {
-            String tag = e.getKey();
-            int requiredCredits = e.getValue();
-            long remainingTimeoutInNanos = Math.max(0L, timeoutInNanos - stopwatch.elapsed(NANOSECONDS));
-            com.google.common.util.concurrent.RateLimiter rateLimiter = tagToRateLimiterMap.get(tag);
-            int availableCredits = rateLimiter.tryAcquire(requiredCredits, remainingTimeoutInNanos, NANOSECONDS)
-                ? requiredCredits
-                : 0;
-            return new ImmutablePair<>(tag, availableCredits);
-          })
+          String tag = e.getKey();
+          int requiredCredits = e.getValue();
+          long remainingTimeoutInNanos = Math.max(0L, timeoutInNanos - stopwatch.elapsed(NANOSECONDS));
+          com.google.common.util.concurrent.RateLimiter rateLimiter = tagToRateLimiterMap.get(tag);
+          int availableCredits = rateLimiter.tryAcquire(requiredCredits, remainingTimeoutInNanos, NANOSECONDS)
+              ? requiredCredits
+              : 0;
+          return new ImmutablePair<>(tag, availableCredits);
+        })
         .collect(Collectors.toMap(ImmutablePair::getKey, ImmutablePair::getValue));
   }
 
@@ -110,22 +110,22 @@ public class EmbeddedTaggedRateLimiter implements RateLimiter {
   public void init(Context context) {
     this.tagToRateLimiterMap = Collections.unmodifiableMap(tagToTargetRateMap.entrySet().stream()
         .map(e -> {
-            String tag = e.getKey();
-            JobModel jobModel = ((TaskContextImpl) context.getTaskContext()).getJobModel();
-            int numTasks = jobModel.getContainers().values().stream()
-                .mapToInt(cm -> cm.getTasks().size())
-                .sum();
-            double effectiveRate = (double) e.getValue() / numTasks;
-            TaskName taskName = context.getTaskContext().getTaskModel().getTaskName();
-            LOGGER.info(String.format("Effective rate limit for task %s and tag %s is %f", taskName, tag,
-                effectiveRate));
-            if (effectiveRate < 1.0) {
-              LOGGER.warn(String.format("Effective limit rate (%f) is very low. "
-                              + "Total rate limit is %d while number of tasks is %d. Consider increasing the rate limit.",
-                        effectiveRate, e.getValue(), numTasks));
-            }
-            return new ImmutablePair<>(tag, com.google.common.util.concurrent.RateLimiter.create(effectiveRate));
-          })
+          String tag = e.getKey();
+          JobModel jobModel = ((TaskContextImpl) context.getTaskContext()).getJobModel();
+          int numTasks = jobModel.getContainers().values().stream()
+              .mapToInt(cm -> cm.getTasks().size())
+              .sum();
+          double effectiveRate = (double) e.getValue() / numTasks;
+          TaskName taskName = context.getTaskContext().getTaskModel().getTaskName();
+          LOGGER.info(String.format("Effective rate limit for task %s and tag %s is %f", taskName, tag,
+              effectiveRate));
+          if (effectiveRate < 1.0) {
+            LOGGER.warn(String.format("Effective limit rate (%f) is very low. "
+                            + "Total rate limit is %d while number of tasks is %d. Consider increasing the rate limit.",
+                      effectiveRate, e.getValue(), numTasks));
+          }
+          return new ImmutablePair<>(tag, com.google.common.util.concurrent.RateLimiter.create(effectiveRate));
+        })
         .collect(Collectors.toMap(ImmutablePair::getKey, ImmutablePair::getValue))
     );
     initialized = true;
diff --git a/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java b/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java
index e15fe7f..1b94824 100644
--- a/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java
+++ b/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java
@@ -97,10 +97,10 @@ public class ReflectionUtil {
         Class<?>[] argClasses = new Class<?>[args.length];
         Object[] argValues = new Object[args.length];
         IntStream.range(0, args.length).forEach(i -> {
-            ConstructorArgument<?> constructorArgument = args[i];
-            argClasses[i] = constructorArgument.getClazz();
-            argValues[i] = constructorArgument.getValue();
-          });
+          ConstructorArgument<?> constructorArgument = args[i];
+          argClasses[i] = constructorArgument.getClazz();
+          argValues[i] = constructorArgument.getValue();
+        });
         Constructor<T> constructor = classObj.getDeclaredConstructor(argClasses);
         return constructor.newInstance(argValues);
       }
diff --git a/samza-core/src/main/java/org/apache/samza/util/Util.java b/samza-core/src/main/java/org/apache/samza/util/Util.java
index ad7b85c..f233c32 100644
--- a/samza-core/src/main/java/org/apache/samza/util/Util.java
+++ b/samza-core/src/main/java/org/apache/samza/util/Util.java
@@ -53,9 +53,9 @@ public class Util {
 
   public static String getSamzaVersion() {
     return Optional.ofNullable(Util.class.getPackage().getImplementationVersion()).orElseGet(() -> {
-        LOG.warn("Unable to find implementation samza version in jar's meta info. Defaulting to {}", FALLBACK_VERSION);
-        return FALLBACK_VERSION;
-      });
+      LOG.warn("Unable to find implementation samza version in jar's meta info. Defaulting to {}", FALLBACK_VERSION);
+      return FALLBACK_VERSION;
+    });
   }
 
   public static String getTaskClassVersion(Config config) {
diff --git a/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java b/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java
index 51f5555..b951b6e 100644
--- a/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java
+++ b/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java
@@ -194,17 +194,17 @@ public class ZkBarrierForVersionUpgrade {
       // check if all the expected participants are in
       if (participantIds.size() == expectedParticipantIds.size() && CollectionUtils.containsAll(participantIds, expectedParticipantIds)) {
         debounceTimer.scheduleAfterDebounceTime(ACTION_NAME, 0, () -> {
-            String barrierStatePath = keyBuilder.getBarrierStatePath(barrierVersion);
-            State barrierState = State.valueOf(zkUtils.getZkClient().readData(barrierStatePath));
-            if (Objects.equals(barrierState, State.NEW)) {
-              LOG.info(String.format("Expected participants has joined the barrier version: %s. Marking the barrier state: %s as %s.", barrierVersion, barrierStatePath, State.DONE));
-              zkUtils.writeData(barrierStatePath, State.DONE.toString()); // this will trigger notifications
-            } else {
-              LOG.debug(String.format("Barrier version: %s is at: %s state. Not marking barrier as %s.", barrierVersion, barrierState, State.DONE));
-            }
-            LOG.info("Unsubscribing child changes on the path: {} for barrier version: {}.", barrierParticipantPath, barrierVersion);
-            zkUtils.unsubscribeChildChanges(barrierParticipantPath, this);
-          });
+          String barrierStatePath = keyBuilder.getBarrierStatePath(barrierVersion);
+          State barrierState = State.valueOf(zkUtils.getZkClient().readData(barrierStatePath));
+          if (Objects.equals(barrierState, State.NEW)) {
+            LOG.info(String.format("Expected participants has joined the barrier version: %s. Marking the barrier state: %s as %s.", barrierVersion, barrierStatePath, State.DONE));
+            zkUtils.writeData(barrierStatePath, State.DONE.toString()); // this will trigger notifications
+          } else {
+            LOG.debug(String.format("Barrier version: %s is at: %s state. Not marking barrier as %s.", barrierVersion, barrierState, State.DONE));
+          }
+          LOG.info("Unsubscribing child changes on the path: {} for barrier version: {}.", barrierParticipantPath, barrierVersion);
+          zkUtils.unsubscribeChildChanges(barrierParticipantPath, this);
+        });
       }
     }
   }
diff --git a/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java b/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java
index 86c1f06..e03ce8b 100644
--- a/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java
+++ b/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java
@@ -142,9 +142,9 @@ public class ZkJobCoordinator implements JobCoordinator {
     this.debounceTimeMs = new JobConfig(config).getDebounceTimeMs();
     debounceTimer = new ScheduleAfterDebounceTime(processorId);
     debounceTimer.setScheduledTaskCallback(throwable -> {
-        LOG.error("Received exception in debounce timer! Stopping the job coordinator", throwable);
-        stop();
-      });
+      LOG.error("Received exception in debounce timer! Stopping the job coordinator", throwable);
+      stop();
+    });
     this.barrier =  new ZkBarrierForVersionUpgrade(zkUtils.getKeyBuilder().getJobModelVersionBarrierPrefix(), zkUtils, new ZkBarrierListenerImpl(), debounceTimer);
     systemAdmins = new SystemAdmins(config);
     streamMetadataCache = new StreamMetadataCache(systemAdmins, METADATA_CACHE_TTL_MS, SystemClock.instance());
@@ -375,11 +375,11 @@ public class ZkJobCoordinator implements JobCoordinator {
     Set<SystemStream> inputStreamsToMonitor = new TaskConfig(config).getAllInputStreams();
 
     return new StreamPartitionCountMonitor(
-            inputStreamsToMonitor,
-            streamMetadata,
-            metrics.getMetricsRegistry(),
-            new JobConfig(config).getMonitorPartitionChangeFrequency(),
-            streamsChanged -> {
+        inputStreamsToMonitor,
+        streamMetadata,
+        metrics.getMetricsRegistry(),
+        new JobConfig(config).getMonitorPartitionChangeFrequency(),
+      streamsChanged -> {
         if (leaderElector.amILeader()) {
           debounceTimer.scheduleAfterDebounceTime(ON_PROCESSOR_CHANGE, 0, this::doOnProcessorChange);
         }
@@ -466,20 +466,20 @@ public class ZkJobCoordinator implements JobCoordinator {
       metrics.singleBarrierRebalancingTime.update(System.nanoTime() - startTime);
       if (ZkBarrierForVersionUpgrade.State.DONE.equals(state)) {
         debounceTimer.scheduleAfterDebounceTime(barrierAction, 0, () -> {
-            LOG.info("pid=" + processorId + "new version " + version + " of the job model got confirmed");
-
-            // read the new Model
-            JobModel jobModel = getJobModel();
-            // start the container with the new model
-            if (coordinatorListener != null) {
-              for (ContainerModel containerModel : jobModel.getContainers().values()) {
-                for (TaskName taskName : containerModel.getTasks().keySet()) {
-                  zkUtils.writeTaskLocality(taskName, locationId);
-                }
+          LOG.info("pid=" + processorId + "new version " + version + " of the job model got confirmed");
+
+          // read the new Model
+          JobModel jobModel = getJobModel();
+          // start the container with the new model
+          if (coordinatorListener != null) {
+            for (ContainerModel containerModel : jobModel.getContainers().values()) {
+              for (TaskName taskName : containerModel.getTasks().keySet()) {
+                zkUtils.writeTaskLocality(taskName, locationId);
               }
-              coordinatorListener.onNewJobModel(processorId, jobModel);
             }
-          });
+            coordinatorListener.onNewJobModel(processorId, jobModel);
+          }
+        });
       } else {
         if (ZkBarrierForVersionUpgrade.State.TIMED_OUT.equals(state)) {
           // no-op for non-leaders
@@ -539,26 +539,26 @@ public class ZkJobCoordinator implements JobCoordinator {
     @Override
     public void doHandleDataChange(String dataPath, Object data) {
       debounceTimer.scheduleAfterDebounceTime(JOB_MODEL_VERSION_CHANGE, 0, () -> {
-          String jobModelVersion = (String) data;
+        String jobModelVersion = (String) data;
 
-          LOG.info("Got a notification for new JobModel version. Path = {} Version = {}", dataPath, data);
+        LOG.info("Got a notification for new JobModel version. Path = {} Version = {}", dataPath, data);
 
-          newJobModel = readJobModelFromMetadataStore(jobModelVersion);
-          LOG.info("pid=" + processorId + ": new JobModel is available. Version =" + jobModelVersion + "; JobModel = " + newJobModel);
+        newJobModel = readJobModelFromMetadataStore(jobModelVersion);
+        LOG.info("pid=" + processorId + ": new JobModel is available. Version =" + jobModelVersion + "; JobModel = " + newJobModel);
 
-          if (!newJobModel.getContainers().containsKey(processorId)) {
-            LOG.info("New JobModel does not contain pid={}. Stopping this processor. New JobModel: {}",
-                processorId, newJobModel);
-            stop();
-          } else {
-            // stop current work
-            if (coordinatorListener != null) {
-              coordinatorListener.onJobModelExpired();
-            }
-            // update ZK and wait for all the processors to get this new version
-            barrier.join(jobModelVersion, processorId);
+        if (!newJobModel.getContainers().containsKey(processorId)) {
+          LOG.info("New JobModel does not contain pid={}. Stopping this processor. New JobModel: {}",
+              processorId, newJobModel);
+          stop();
+        } else {
+          // stop current work
+          if (coordinatorListener != null) {
+            coordinatorListener.onJobModelExpired();
           }
-        });
+          // update ZK and wait for all the processors to get this new version
+          barrier.join(jobModelVersion, processorId);
+        }
+      });
     }
 
     @Override
@@ -607,10 +607,10 @@ public class ZkJobCoordinator implements JobCoordinator {
           LOG.info("Cancelling all scheduled actions in session expiration for processorId: {}.", processorId);
           debounceTimer.cancelAllScheduledActions();
           debounceTimer.scheduleAfterDebounceTime(ZK_SESSION_EXPIRED, 0, () -> {
-              if (coordinatorListener != null) {
-                coordinatorListener.onJobModelExpired();
-              }
-            });
+            if (coordinatorListener != null) {
+              coordinatorListener.onJobModelExpired();
+            }
+          });
 
           return;
         case Disconnected:
diff --git a/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java b/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java
index 3087eb4..19411b4 100644
--- a/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java
+++ b/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java
@@ -300,12 +300,12 @@ public class ContainerStorageManager {
     Map<TaskName, Map<String, Set<SystemStreamPartition>>> taskSideInputSSPs = new HashMap<>();
 
     containerModel.getTasks().forEach((taskName, taskModel) -> {
-        taskSideInputSSPs.putIfAbsent(taskName, new HashMap<>());
-        sideInputSystemStreams.keySet().forEach(storeName -> {
-            Set<SystemStreamPartition> taskSideInputs = taskModel.getSystemStreamPartitions().stream().filter(ssp -> sideInputSystemStreams.get(storeName).contains(ssp.getSystemStream())).collect(Collectors.toSet());
-            taskSideInputSSPs.get(taskName).put(storeName, taskSideInputs);
-          });
+      taskSideInputSSPs.putIfAbsent(taskName, new HashMap<>());
+      sideInputSystemStreams.keySet().forEach(storeName -> {
+        Set<SystemStreamPartition> taskSideInputs = taskModel.getSystemStreamPartitions().stream().filter(ssp -> sideInputSystemStreams.get(storeName).contains(ssp.getSystemStream())).collect(Collectors.toSet());
+        taskSideInputSSPs.get(taskName).put(storeName, taskSideInputs);
       });
+    });
     return taskSideInputSSPs;
   }
 
@@ -329,13 +329,13 @@ public class ContainerStorageManager {
     );
 
     getTasks(containerModel, TaskMode.Standby).forEach((taskName, taskModel) -> {
-        this.taskSideInputStoreSSPs.putIfAbsent(taskName, new HashMap<>());
-        changelogSystemStreams.forEach((storeName, systemStream) -> {
-            SystemStreamPartition ssp = new SystemStreamPartition(systemStream, taskModel.getChangelogPartition());
-            changelogSSPToStore.remove(ssp);
-            this.taskSideInputStoreSSPs.get(taskName).put(storeName, Collections.singleton(ssp));
-          });
+      this.taskSideInputStoreSSPs.putIfAbsent(taskName, new HashMap<>());
+      changelogSystemStreams.forEach((storeName, systemStream) -> {
+        SystemStreamPartition ssp = new SystemStreamPartition(systemStream, taskModel.getChangelogPartition());
+        changelogSSPToStore.remove(ssp);
+        this.taskSideInputStoreSSPs.get(taskName).put(storeName, Collections.singleton(ssp));
       });
+    });
 
     // changelogSystemStreams correspond only to active tasks (since those of standby-tasks moved to sideInputs above)
     return MapUtils.invertMap(changelogSSPToStore).entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, x -> x.getValue().getSystemStream()));
@@ -377,13 +377,13 @@ public class ContainerStorageManager {
   private Map<TaskName, TaskRestoreManager> createTaskRestoreManagers(SystemAdmins systemAdmins, Clock clock, SamzaContainerMetrics samzaContainerMetrics) {
     Map<TaskName, TaskRestoreManager> taskRestoreManagers = new HashMap<>();
     containerModel.getTasks().forEach((taskName, taskModel) -> {
-        taskRestoreManagers.put(taskName,
-            TaskRestoreManagerFactory.create(
-                taskModel, changelogSystemStreams, getNonSideInputStores(taskName), systemAdmins,
-                streamMetadataCache, sspMetadataCache, storeConsumers, maxChangeLogStreamPartitions,
-                loggedStoreBaseDirectory, nonLoggedStoreBaseDirectory, config, clock));
-        samzaContainerMetrics.addStoresRestorationGauge(taskName);
-      });
+      taskRestoreManagers.put(taskName,
+          TaskRestoreManagerFactory.create(
+              taskModel, changelogSystemStreams, getNonSideInputStores(taskName), systemAdmins,
+              streamMetadataCache, sspMetadataCache, storeConsumers, maxChangeLogStreamPartitions,
+              loggedStoreBaseDirectory, nonLoggedStoreBaseDirectory, config, clock));
+      samzaContainerMetrics.addStoresRestorationGauge(taskName);
+    });
     return taskRestoreManagers;
   }
 
@@ -535,57 +535,57 @@ public class ContainerStorageManager {
 
     Map<TaskName, Map<String, SideInputsProcessor>> sideInputStoresToProcessors = new HashMap<>();
     containerModel.getTasks().forEach((taskName, taskModel) -> {
-        sideInputStoresToProcessors.put(taskName, new HashMap<>());
-        TaskMode taskMode = taskModel.getTaskMode();
-
-        for (String storeName : this.taskSideInputStoreSSPs.get(taskName).keySet()) {
-
-          SideInputsProcessor sideInputsProcessor;
-          Optional<String> sideInputsProcessorSerializedInstance =
-              config.getSideInputsProcessorSerializedInstance(storeName);
-
-          if (sideInputsProcessorSerializedInstance.isPresent()) {
-
-            sideInputsProcessor = SerdeUtils.deserialize("Side Inputs Processor", sideInputsProcessorSerializedInstance.get());
-            LOG.info("Using serialized side-inputs-processor for store: {}, task: {}", storeName, taskName);
-
-          } else if (config.getSideInputsProcessorFactory(storeName).isPresent()) {
-            String sideInputsProcessorFactoryClassName = config.getSideInputsProcessorFactory(storeName).get();
-            SideInputsProcessorFactory sideInputsProcessorFactory =
-                ReflectionUtil.getObj(sideInputsProcessorFactoryClassName, SideInputsProcessorFactory.class);
-            sideInputsProcessor = sideInputsProcessorFactory.getSideInputsProcessor(config, taskInstanceMetrics.get(taskName).registry());
-            LOG.info("Using side-inputs-processor from factory: {} for store: {}, task: {}", config.getSideInputsProcessorFactory(storeName).get(), storeName, taskName);
-
-          } else {
-            // if this is a active-task with a side-input store but no sideinput-processor-factory defined in config, we rely on upstream validations to fail the deploy
-
-            // if this is a standby-task and the store is a non-side-input changelog store
-            // we creating identity sideInputProcessor for stores of standbyTasks
-            // have to use the right serde because the sideInput stores are created
-
-            Serde keySerde = serdes.get(config.getStorageKeySerde(storeName)
-                .orElseThrow(() -> new SamzaException("Could not find storage key serde for store: " + storeName)));
-            Serde msgSerde = serdes.get(config.getStorageMsgSerde(storeName)
-                .orElseThrow(() -> new SamzaException("Could not find storage msg serde for store: " + storeName)));
-            sideInputsProcessor = new SideInputsProcessor() {
-              @Override
-              public Collection<Entry<?, ?>> process(IncomingMessageEnvelope message, KeyValueStore store) {
-                // Ignore message if the key is null
-                if (message.getKey() == null) {
-                  return ImmutableList.of();
-                } else {
-                  // Skip serde if the message is null
-                  return ImmutableList.of(new Entry<>(keySerde.fromBytes((byte[]) message.getKey()),
-                      message.getMessage() == null ? null : msgSerde.fromBytes((byte[]) message.getMessage())));
-                }
-              }
-            };
-            LOG.info("Using identity side-inputs-processor for store: {}, task: {}", storeName, taskName);
-          }
+      sideInputStoresToProcessors.put(taskName, new HashMap<>());
+      TaskMode taskMode = taskModel.getTaskMode();
+
+      for (String storeName : this.taskSideInputStoreSSPs.get(taskName).keySet()) {
+
+        SideInputsProcessor sideInputsProcessor;
+        Optional<String> sideInputsProcessorSerializedInstance =
+            config.getSideInputsProcessorSerializedInstance(storeName);
 
-          sideInputStoresToProcessors.get(taskName).put(storeName, sideInputsProcessor);
+        if (sideInputsProcessorSerializedInstance.isPresent()) {
+
+          sideInputsProcessor = SerdeUtils.deserialize("Side Inputs Processor", sideInputsProcessorSerializedInstance.get());
+          LOG.info("Using serialized side-inputs-processor for store: {}, task: {}", storeName, taskName);
+
+        } else if (config.getSideInputsProcessorFactory(storeName).isPresent()) {
+          String sideInputsProcessorFactoryClassName = config.getSideInputsProcessorFactory(storeName).get();
+          SideInputsProcessorFactory sideInputsProcessorFactory =
+              ReflectionUtil.getObj(sideInputsProcessorFactoryClassName, SideInputsProcessorFactory.class);
+          sideInputsProcessor = sideInputsProcessorFactory.getSideInputsProcessor(config, taskInstanceMetrics.get(taskName).registry());
+          LOG.info("Using side-inputs-processor from factory: {} for store: {}, task: {}", config.getSideInputsProcessorFactory(storeName).get(), storeName, taskName);
+
+        } else {
+          // if this is a active-task with a side-input store but no sideinput-processor-factory defined in config, we rely on upstream validations to fail the deploy
+
+          // if this is a standby-task and the store is a non-side-input changelog store
+          // we creating identity sideInputProcessor for stores of standbyTasks
+          // have to use the right serde because the sideInput stores are created
+
+          Serde keySerde = serdes.get(config.getStorageKeySerde(storeName)
+              .orElseThrow(() -> new SamzaException("Could not find storage key serde for store: " + storeName)));
+          Serde msgSerde = serdes.get(config.getStorageMsgSerde(storeName)
+              .orElseThrow(() -> new SamzaException("Could not find storage msg serde for store: " + storeName)));
+          sideInputsProcessor = new SideInputsProcessor() {
+            @Override
+            public Collection<Entry<?, ?>> process(IncomingMessageEnvelope message, KeyValueStore store) {
+              // Ignore message if the key is null
+              if (message.getKey() == null) {
+                return ImmutableList.of();
+              } else {
+                // Skip serde if the message is null
+                return ImmutableList.of(new Entry<>(keySerde.fromBytes((byte[]) message.getKey()),
+                    message.getMessage() == null ? null : msgSerde.fromBytes((byte[]) message.getMessage())));
+              }
+            }
+          };
+          LOG.info("Using identity side-inputs-processor for store: {}, task: {}", storeName, taskName);
         }
-      });
+
+        sideInputStoresToProcessors.get(taskName).put(storeName, sideInputsProcessor);
+      }
+    });
 
     return sideInputStoresToProcessors;
   }
@@ -601,31 +601,31 @@ public class ContainerStorageManager {
     if (this.hasSideInputs) {
       containerModel.getTasks().forEach((taskName, taskModel) -> {
 
-          Map<String, StorageEngine> sideInputStores = getSideInputStores(taskName);
-          Map<String, Set<SystemStreamPartition>> sideInputStoresToSSPs = new HashMap<>();
+        Map<String, StorageEngine> sideInputStores = getSideInputStores(taskName);
+        Map<String, Set<SystemStreamPartition>> sideInputStoresToSSPs = new HashMap<>();
 
-          for (String storeName : sideInputStores.keySet()) {
-            Set<SystemStreamPartition> storeSSPs = this.taskSideInputStoreSSPs.get(taskName).get(storeName);
-            sideInputStoresToSSPs.put(storeName, storeSSPs);
-          }
-
-          TaskSideInputHandler taskSideInputHandler = new TaskSideInputHandler(taskName,
-              taskModel.getTaskMode(),
-              loggedStoreBaseDirectory,
-              sideInputStores,
-              sideInputStoresToSSPs,
-              taskSideInputProcessors.get(taskName),
-              this.systemAdmins,
-              this.streamMetadataCache,
-              clock);
-
-          sideInputStoresToSSPs.values().stream().flatMap(Set::stream).forEach(ssp -> {
-              handlers.put(ssp, taskSideInputHandler);
-            });
+        for (String storeName : sideInputStores.keySet()) {
+          Set<SystemStreamPartition> storeSSPs = this.taskSideInputStoreSSPs.get(taskName).get(storeName);
+          sideInputStoresToSSPs.put(storeName, storeSSPs);
+        }
 
-          LOG.info("Created TaskSideInputHandler for task {}, sideInputStores {} and loggedStoreBaseDirectory {}",
-              taskName, sideInputStores, loggedStoreBaseDirectory);
+        TaskSideInputHandler taskSideInputHandler = new TaskSideInputHandler(taskName,
+            taskModel.getTaskMode(),
+            loggedStoreBaseDirectory,
+            sideInputStores,
+            sideInputStoresToSSPs,
+            taskSideInputProcessors.get(taskName),
+            this.systemAdmins,
+            this.streamMetadataCache,
+            clock);
+
+        sideInputStoresToSSPs.values().stream().flatMap(Set::stream).forEach(ssp -> {
+          handlers.put(ssp, taskSideInputHandler);
         });
+
+        LOG.info("Created TaskSideInputHandler for task {}, sideInputStores {} and loggedStoreBaseDirectory {}",
+            taskName, sideInputStores, loggedStoreBaseDirectory);
+      });
     }
     return handlers;
   }
@@ -648,18 +648,18 @@ public class ContainerStorageManager {
     Map<SystemStreamPartition, String> checkpointedChangelogSSPOffsets = new HashMap<>();
     if (new TaskConfig(config).getTransactionalStateRestoreEnabled()) {
       getTasks(containerModel, TaskMode.Active).forEach((taskName, taskModel) -> {
-          if (checkpointManager != null) {
-            Set<SystemStream> changelogSystemStreams = new HashSet<>(this.changelogSystemStreams.values());
-            Checkpoint checkpoint = checkpointManager.readLastCheckpoint(taskName);
-            if (checkpoint != null) {
-              checkpoint.getOffsets().forEach((ssp, offset) -> {
-                  if (changelogSystemStreams.contains(new SystemStream(ssp.getSystem(), ssp.getStream()))) {
-                    checkpointedChangelogSSPOffsets.put(ssp, offset);
-                  }
-                });
-            }
+        if (checkpointManager != null) {
+          Set<SystemStream> changelogSystemStreams = new HashSet<>(this.changelogSystemStreams.values());
+          Checkpoint checkpoint = checkpointManager.readLastCheckpoint(taskName);
+          if (checkpoint != null) {
+            checkpoint.getOffsets().forEach((ssp, offset) -> {
+              if (changelogSystemStreams.contains(new SystemStream(ssp.getSystem(), ssp.getStream()))) {
+                checkpointedChangelogSSPOffsets.put(ssp, offset);
+              }
+            });
           }
-        });
+        }
+      });
     }
     LOG.info("Checkpointed changelog ssp offsets: {}", checkpointedChangelogSSPOffsets);
     restoreStores(checkpointedChangelogSSPOffsets);
@@ -688,9 +688,9 @@ public class ContainerStorageManager {
 
     // Submit restore callable for each taskInstance
     this.taskRestoreManagers.forEach((taskInstance, taskRestoreManager) -> {
-        taskRestoreFutures.add(executorService.submit(
-            new TaskRestoreCallable(this.samzaContainerMetrics, taskInstance, taskRestoreManager)));
-      });
+      taskRestoreFutures.add(executorService.submit(
+          new TaskRestoreCallable(this.samzaContainerMetrics, taskInstance, taskRestoreManager)));
+    });
 
     // loop-over the future list to wait for each thread to finish, catch any exceptions during restore and throw
     // as samza exceptions
@@ -778,26 +778,26 @@ public class ContainerStorageManager {
 
     // submit the sideInput read runnable
       sideInputsReadExecutor.submit(() -> {
-          try {
-            while (!shouldShutdown) {
-              IncomingMessageEnvelope envelope = sideInputSystemConsumers.choose(true);
-
-              if (envelope != null) {
-                if (!envelope.isEndOfStream()) {
-                  this.sspSideInputHandlers.get(envelope.getSystemStreamPartition()).process(envelope);
-                }
+        try {
+          while (!shouldShutdown) {
+            IncomingMessageEnvelope envelope = sideInputSystemConsumers.choose(true);
 
-                checkSideInputCaughtUp(envelope.getSystemStreamPartition(), envelope.getOffset(),
-                    SystemStreamMetadata.OffsetType.NEWEST, envelope.isEndOfStream());
-              } else {
-                LOG.trace("No incoming message was available");
+            if (envelope != null) {
+              if (!envelope.isEndOfStream()) {
+                this.sspSideInputHandlers.get(envelope.getSystemStreamPartition()).process(envelope);
               }
+
+              checkSideInputCaughtUp(envelope.getSystemStreamPartition(), envelope.getOffset(),
+                  SystemStreamMetadata.OffsetType.NEWEST, envelope.isEndOfStream());
+            } else {
+              LOG.trace("No incoming message was available");
             }
-          } catch (Exception e) {
-            LOG.error("Exception in reading sideInputs", e);
-            sideInputException = e;
           }
-        });
+        } catch (Exception e) {
+          LOG.error("Exception in reading sideInputs", e);
+          sideInputException = e;
+        }
+      });
 
       // Make the main thread wait until all sideInputs have been caughtup or an exception was thrown
       while (!shouldShutdown && sideInputException == null &&
diff --git a/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java b/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java
index d1fab98..b4fc75c 100644
--- a/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java
+++ b/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java
@@ -87,8 +87,8 @@ public class TestStreamApplicationDescriptorImpl {
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mockValueSerde);
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -110,8 +110,8 @@ public class TestStreamApplicationDescriptorImpl {
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mockKVSerde);
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -126,8 +126,8 @@ public class TestStreamApplicationDescriptorImpl {
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor("mockStreamId", null);
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
   }
 
   @Test
@@ -138,8 +138,8 @@ public class TestStreamApplicationDescriptorImpl {
     MockTransformingSystemDescriptor sd = new MockTransformingSystemDescriptor("mockSystem", transformer);
     MockInputDescriptor isd = sd.getInputDescriptor(streamId, mockValueSerde);
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -164,8 +164,8 @@ public class TestStreamApplicationDescriptorImpl {
     MockExpandingSystemDescriptor sd = new MockExpandingSystemDescriptor("mock-system", expander);
     MockInputDescriptor isd = sd.getInputDescriptor(streamId, new IntegerSerde());
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(expandedStreamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -182,8 +182,8 @@ public class TestStreamApplicationDescriptorImpl {
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mock(Serde.class));
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -200,9 +200,9 @@ public class TestStreamApplicationDescriptorImpl {
     GenericInputDescriptor isd2 = sd.getInputDescriptor(streamId2, mock(Serde.class));
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd1);
-        appDesc.getInputStream(isd2);
-      }, getConfig());
+      appDesc.getInputStream(isd1);
+      appDesc.getInputStream(isd2);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec1 = streamAppDesc.getInputOperators().get(streamId1);
     InputOperatorSpec inputOpSpec2 = streamAppDesc.getInputOperators().get(streamId2);
@@ -222,10 +222,10 @@ public class TestStreamApplicationDescriptorImpl {
     GenericInputDescriptor isd1 = sd.getInputDescriptor(streamId, mock(Serde.class));
     GenericInputDescriptor isd2 = sd.getInputDescriptor(streamId, mock(Serde.class));
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd1);
-        // should throw exception
-        appDesc.getInputStream(isd2);
-      }, getConfig());
+      appDesc.getInputStream(isd1);
+      // should throw exception
+      appDesc.getInputStream(isd2);
+    }, getConfig());
   }
 
   @Test
@@ -237,25 +237,25 @@ public class TestStreamApplicationDescriptorImpl {
     GenericOutputDescriptor osd1 = sd2.getOutputDescriptor("test-stream-3", mock(Serde.class));
 
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd1);
-        try {
-          appDesc.getInputStream(isd2);
-          fail("Adding input stream with the same system name but different SystemDescriptor should have failed");
-        } catch (IllegalStateException e) { }
+      appDesc.getInputStream(isd1);
+      try {
+        appDesc.getInputStream(isd2);
+        fail("Adding input stream with the same system name but different SystemDescriptor should have failed");
+      } catch (IllegalStateException e) { }
 
-        try {
-          appDesc.getOutputStream(osd1);
-          fail("adding output stream with the same system name but different SystemDescriptor should have failed");
-        } catch (IllegalStateException e) { }
-      }, getConfig());
+      try {
+        appDesc.getOutputStream(osd1);
+        fail("adding output stream with the same system name but different SystemDescriptor should have failed");
+      } catch (IllegalStateException e) { }
+    }, getConfig());
 
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.withDefaultSystem(sd2);
-        try {
-          appDesc.getInputStream(isd1);
-          fail("Adding input stream with the same system name as the default system but different SystemDescriptor should have failed");
-        } catch (IllegalStateException e) { }
-      }, getConfig());
+      appDesc.withDefaultSystem(sd2);
+      try {
+        appDesc.getInputStream(isd1);
+        fail("Adding input stream with the same system name as the default system but different SystemDescriptor should have failed");
+      } catch (IllegalStateException e) { }
+    }, getConfig());
   }
 
   @Test
@@ -270,8 +270,8 @@ public class TestStreamApplicationDescriptorImpl {
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, mockKVSerde);
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+    }, getConfig());
 
     OutputStreamImpl<TestMessageEnvelope> outputStreamImpl = streamAppDesc.getOutputStreams().get(streamId);
     assertEquals(streamId, outputStreamImpl.getStreamId());
@@ -286,8 +286,8 @@ public class TestStreamApplicationDescriptorImpl {
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, null);
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+    }, getConfig());
   }
 
   @Test
@@ -298,8 +298,8 @@ public class TestStreamApplicationDescriptorImpl {
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, mockValueSerde);
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+    }, getConfig());
 
     OutputStreamImpl<TestMessageEnvelope> outputStreamImpl = streamAppDesc.getOutputStreams().get(streamId);
     assertEquals(streamId, outputStreamImpl.getStreamId());
@@ -315,9 +315,9 @@ public class TestStreamApplicationDescriptorImpl {
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mock(Serde.class));
 
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-        appDesc.withDefaultSystem(sd); // should throw exception
-      }, getConfig());
+      appDesc.getInputStream(isd);
+      appDesc.withDefaultSystem(sd); // should throw exception
+    }, getConfig());
   }
 
   @Test(expected = IllegalStateException.class)
@@ -326,9 +326,9 @@ public class TestStreamApplicationDescriptorImpl {
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, mock(Serde.class));
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-        appDesc.withDefaultSystem(sd); // should throw exception
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+      appDesc.withDefaultSystem(sd); // should throw exception
+    }, getConfig());
   }
 
   @Test(expected = IllegalStateException.class)
@@ -346,9 +346,9 @@ public class TestStreamApplicationDescriptorImpl {
     GenericOutputDescriptor osd1 = sd.getOutputDescriptor(streamId, mock(Serde.class));
     GenericOutputDescriptor osd2 = sd.getOutputDescriptor(streamId, mock(Serde.class));
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd1);
-        appDesc.getOutputStream(osd2); // should throw exception
-      }, getConfig());
+      appDesc.getOutputStream(osd1);
+      appDesc.getOutputStream(osd2); // should throw exception
+    }, getConfig());
   }
 
   @Test
@@ -497,10 +497,10 @@ public class TestStreamApplicationDescriptorImpl {
 
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(sd.getInputDescriptor(testStreamId1, mock(Serde.class)));
-        appDesc.getInputStream(sd.getInputDescriptor(testStreamId2, mock(Serde.class)));
-        appDesc.getInputStream(sd.getInputDescriptor(testStreamId3, mock(Serde.class)));
-      }, mockConfig);
+      appDesc.getInputStream(sd.getInputDescriptor(testStreamId1, mock(Serde.class)));
+      appDesc.getInputStream(sd.getInputDescriptor(testStreamId2, mock(Serde.class)));
+      appDesc.getInputStream(sd.getInputDescriptor(testStreamId3, mock(Serde.class)));
+    }, mockConfig);
 
     List<InputOperatorSpec> inputSpecs = new ArrayList<>(streamAppDesc.getInputOperators().values());
     assertEquals(inputSpecs.size(), 3);
@@ -518,8 +518,8 @@ public class TestStreamApplicationDescriptorImpl {
     when(mockTableDescriptor.getTableId()).thenReturn(tableId);
     AtomicReference<TableImpl> table = new AtomicReference<>();
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        table.set((TableImpl) appDesc.getTable(mockTableDescriptor));
-      }, mockConfig);
+      table.set((TableImpl) appDesc.getTable(mockTableDescriptor));
+    }, mockConfig);
     assertEquals(tableId, table.get().getTableId());
   }
 
@@ -567,10 +567,10 @@ public class TestStreamApplicationDescriptorImpl {
   public void testGetTableWithBadId() {
     Config mockConfig = getConfig();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        BaseTableDescriptor mockTableDescriptor = mock(BaseTableDescriptor.class);
-        when(mockTableDescriptor.getTableId()).thenReturn("my.table");
-        appDesc.getTable(mockTableDescriptor);
-      }, mockConfig);
+      BaseTableDescriptor mockTableDescriptor = mock(BaseTableDescriptor.class);
+      when(mockTableDescriptor.getTableId()).thenReturn("my.table");
+      appDesc.getTable(mockTableDescriptor);
+    }, mockConfig);
   }
 
   private Config getConfig() {
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java
index 593ddb9..d5819eb 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java
@@ -375,10 +375,10 @@ public class TestContainerAllocatorWithHostAffinity {
         new ContainerManager(containerPlacementMetadataStore, state, mockClusterResourceManager, true, false);
     // Mock the callback from ClusterManager to add resources to the allocator
     doAnswer((InvocationOnMock invocation) -> {
-        SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
-        spyAllocator.addResource(resource);
-        return null;
-      }).when(mockCPM).onResourcesAvailable(anyList());
+      SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
+      spyAllocator.addResource(resource);
+      return null;
+    }).when(mockCPM).onResourcesAvailable(anyList());
 
     spyAllocator = Mockito.spy(
         new ContainerAllocator(mockClusterResourceManager, config, state, true, containerManager));
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java
index 9d55218..f9104bd 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java
@@ -281,10 +281,10 @@ public class TestContainerAllocatorWithoutHostAffinity {
         new ContainerAllocator(mockManager, config, state, false, spyContainerManager));
     // Mock the callback from ClusterManager to add resources to the allocator
     doAnswer((InvocationOnMock invocation) -> {
-        SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
-        spyAllocator.addResource(resource);
-        return null;
-      }).when(mockCPM).onResourcesAvailable(anyList());
+      SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
+      spyAllocator.addResource(resource);
+      return null;
+    }).when(mockCPM).onResourcesAvailable(anyList());
     // Request Resources
     spyAllocator.requestResources(containersToHostMapping);
     spyThread = new Thread(spyAllocator, "Container Allocator Thread");
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java
index 49b013d..0ec635d 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java
@@ -125,9 +125,9 @@ public class TestContainerPlacementActions {
   private JobModelManager getJobModelManagerWithHostAffinity(Map<String, String> containerIdToHost) {
     Map<String, Map<String, String>> localityMap = new HashMap<>();
     containerIdToHost.forEach((containerId, host) -> {
-        localityMap.put(containerId,
-            ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
-      });
+      localityMap.put(containerId,
+          ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
+    });
     LocalityManager mockLocalityManager = mock(LocalityManager.class);
     when(mockLocalityManager.readContainerLocality()).thenReturn(localityMap);
 
@@ -138,9 +138,9 @@ public class TestContainerPlacementActions {
   private JobModelManager getJobModelManagerWithHostAffinityWithStandby(Map<String, String> containerIdToHost) {
     Map<String, Map<String, String>> localityMap = new HashMap<>();
     containerIdToHost.forEach((containerId, host) -> {
-        localityMap.put(containerId,
-            ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
-      });
+      localityMap.put(containerId,
+          ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
+    });
     LocalityManager mockLocalityManager = mock(LocalityManager.class);
     when(mockLocalityManager.readContainerLocality()).thenReturn(localityMap);
     // Generate JobModel for standby containers
@@ -193,9 +193,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -203,9 +203,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -213,9 +213,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
@@ -287,9 +287,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -297,9 +297,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -307,9 +307,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
@@ -389,12 +389,12 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            List<SamzaResource> resources = (List<SamzaResource>) args[0];
-            if (resources.get(0).getHost().equals("host-1") || resources.get(0).getHost().equals("host-2")) {
-              cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-            }
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          List<SamzaResource> resources = (List<SamzaResource>) args[0];
+          if (resources.get(0).getHost().equals("host-1") || resources.get(0).getHost().equals("host-2")) {
+            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+          }
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -402,9 +402,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -467,9 +467,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -478,14 +478,14 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            SamzaResource host3Resource = (SamzaResource) args[0];
-            if (host3Resource.getHost().equals("host-3")) {
-              cpm.onStreamProcessorLaunchFailure(host3Resource, new Throwable("Custom Exception for Host-3"));
-            } else {
-              cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-            }
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          SamzaResource host3Resource = (SamzaResource) args[0];
+          if (host3Resource.getHost().equals("host-3")) {
+            cpm.onStreamProcessorLaunchFailure(host3Resource, new Throwable("Custom Exception for Host-3"));
+          } else {
+            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+          }
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -493,9 +493,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
@@ -689,14 +689,14 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            List<SamzaResource> resources = (List<SamzaResource>) args[0];
-            SamzaResource preferredResource = resources.get(0);
-            SamzaResource anyResource =
-                new SamzaResource(preferredResource.getNumCores(), preferredResource.getMemoryMb(),
-                    "host-" + RandomStringUtils.randomAlphanumeric(5), preferredResource.getContainerId());
-            cpm.onResourcesAvailable(ImmutableList.of(anyResource));
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          List<SamzaResource> resources = (List<SamzaResource>) args[0];
+          SamzaResource preferredResource = resources.get(0);
+          SamzaResource anyResource =
+              new SamzaResource(preferredResource.getNumCores(), preferredResource.getMemoryMb(),
+                  "host-" + RandomStringUtils.randomAlphanumeric(5), preferredResource.getContainerId());
+          cpm.onResourcesAvailable(ImmutableList.of(anyResource));
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -861,9 +861,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -871,9 +871,9 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -881,10 +881,10 @@ public class TestContainerPlacementActions {
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
-          return null;
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
+        return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
 
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java
index 14771e7..a5dbe77 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java
@@ -112,8 +112,8 @@ public class TestContainerProcessManager {
   private JobModelManager getJobModelManagerWithHostAffinity(Map<String, String> containerIdToHost) {
     Map<String, Map<String, String>> localityMap = new HashMap<>();
     containerIdToHost.forEach((containerId, host) -> {
-        localityMap.put(containerId, ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
-      });
+      localityMap.put(containerId, ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
+    });
     LocalityManager mockLocalityManager = mock(LocalityManager.class);
     when(mockLocalityManager.readContainerLocality()).thenReturn(localityMap);
 
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java
index 459f39d..c5f3ec1 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java
@@ -74,9 +74,9 @@ public class TestStandbyAllocator {
           containerConstraints.contains(containerID));
 
       containerConstraints.forEach(containerConstraintID -> {
-          Assert.assertTrue("Constrained containers IDs should correspond to the active container",
-              containerID.split("-")[0].equals(containerConstraintID.split("-")[0]));
-        });
+        Assert.assertTrue("Constrained containers IDs should correspond to the active container",
+            containerID.split("-")[0].equals(containerConstraintID.split("-")[0]));
+      });
     }
   }
 
@@ -118,11 +118,11 @@ public class TestStandbyAllocator {
   private static Map<TaskName, TaskModel> getStandbyTasks(Map<TaskName, TaskModel> tasks, int replicaNum) {
     Map<TaskName, TaskModel> standbyTasks = new HashMap<>();
     tasks.forEach((taskName, taskModel) -> {
-        TaskName standbyTaskName = StandbyTaskUtil.getStandbyTaskName(taskName, replicaNum);
-        standbyTasks.put(standbyTaskName,
-            new TaskModel(standbyTaskName, taskModel.getSystemStreamPartitions(), taskModel.getChangelogPartition(),
-                TaskMode.Standby));
-      });
+      TaskName standbyTaskName = StandbyTaskUtil.getStandbyTaskName(taskName, replicaNum);
+      standbyTasks.put(standbyTaskName,
+          new TaskModel(standbyTaskName, taskModel.getSystemStreamPartitions(), taskModel.getChangelogPartition(),
+              TaskMode.Standby));
+    });
     return standbyTasks;
   }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java b/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java
index bbea19e..40ec38e 100644
--- a/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java
+++ b/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java
@@ -49,11 +49,11 @@ public class TestStreamConfig {
   public void testGetStreamMsgSerde() {
     String value = "my.msg.serde";
     doTestSamzaProperty(StreamConfig.MSG_SERDE, value,
-        (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamMsgSerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamMsgSerde(systemStream)));
     doTestSamzaProperty(StreamConfig.MSG_SERDE, "",
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.MSG_SERDE,
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getStreamMsgSerde);
   }
 
@@ -61,39 +61,39 @@ public class TestStreamConfig {
   public void testGetStreamKeySerde() {
     String value = "my.key.serde";
     doTestSamzaProperty(StreamConfig.KEY_SERDE, value,
-        (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamKeySerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamKeySerde(systemStream)));
     doTestSamzaProperty(StreamConfig.KEY_SERDE, "",
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.KEY_SERDE,
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getStreamKeySerde);
   }
 
   @Test
   public void testGetResetOffset() {
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "true",
-        (config, systemStream) -> assertTrue(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertTrue(config.getResetOffset(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "false",
-        (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "unknown_value",
-        (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_RESET_OFFSET,
-        (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getResetOffset);
   }
 
   @Test
   public void testIsResetOffsetConfigured() {
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "true",
-        (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "false",
-        (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "unknown_value",
-        (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_RESET_OFFSET,
-        (config, systemStream) -> assertFalse(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertFalse(config.isResetOffsetConfigured(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::isResetOffsetConfigured);
   }
 
@@ -101,12 +101,12 @@ public class TestStreamConfig {
   public void testGetDefaultStreamOffset() {
     String value = "my_offset_default";
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, value,
-        (config, systemStream) -> assertEquals(Optional.of(value), config.getDefaultStreamOffset(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(value), config.getDefaultStreamOffset(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, "",
-        (config, systemStream) -> assertEquals(Optional.of(""), config.getDefaultStreamOffset(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(""), config.getDefaultStreamOffset(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_OFFSET_DEFAULT,
-        (config, systemStream) -> assertEquals(Optional.empty(),
-            new StreamConfig(config).getDefaultStreamOffset(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(),
+          new StreamConfig(config).getDefaultStreamOffset(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getDefaultStreamOffset);
   }
 
@@ -114,52 +114,52 @@ public class TestStreamConfig {
   public void testIsDefaultStreamOffsetConfigured() {
     String value = "my_offset_default";
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, value,
-        (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, "",
-        (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_OFFSET_DEFAULT,
-        (config, systemStream) -> assertFalse(config.isDefaultStreamOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertFalse(config.isDefaultStreamOffsetConfigured(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::isDefaultStreamOffsetConfigured);
   }
 
   @Test
   public void testGetBootstrapEnabled() {
     doTestSamzaProperty(StreamConfig.BOOTSTRAP, "true",
-        (config, systemStream) -> assertTrue(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertTrue(config.getBootstrapEnabled(systemStream)));
     doTestSamzaProperty(StreamConfig.BOOTSTRAP, "false",
-        (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.BOOTSTRAP, "unknown_value",
-        (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.BOOTSTRAP,
-        (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getBootstrapEnabled);
   }
 
   @Test
   public void testGetBroadcastEnabled() {
     doTestSamzaProperty(StreamConfig.BROADCAST, "true",
-        (config, systemStream) -> assertTrue(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertTrue(config.getBroadcastEnabled(systemStream)));
     doTestSamzaProperty(StreamConfig.BROADCAST, "false",
-        (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.BROADCAST, "unknown_value",
-        (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.BROADCAST,
-        (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getBroadcastEnabled);
   }
 
   @Test
   public void testGetPriority() {
     doTestSamzaProperty(StreamConfig.PRIORITY, "0",
-        (config, systemStream) -> assertEquals(0, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(0, config.getPriority(systemStream)));
     doTestSamzaProperty(StreamConfig.PRIORITY, "100",
-        (config, systemStream) -> assertEquals(100, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(100, config.getPriority(systemStream)));
     doTestSamzaProperty(StreamConfig.PRIORITY, "-1",
-        (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.PRIORITY,
-        (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getPriority);
   }
 
diff --git a/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java b/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java
index 65701b3..2445c00 100644
--- a/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java
+++ b/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java
@@ -103,16 +103,16 @@ public class TestContainerHeartbeatMonitor {
     ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
     when(scheduler.scheduleAtFixedRate(any(), eq(0L), eq((long) ContainerHeartbeatMonitor.SCHEDULE_MS),
         eq(TimeUnit.MILLISECONDS))).thenAnswer(invocation -> {
-            Runnable command = invocation.getArgumentAt(0, Runnable.class);
-            (new Thread(() -> {
-                // just need to invoke the command once for these tests
-                command.run();
-                // notify that the execution is done, so verifications can begin
-                schedulerFixedRateExecutionLatch.countDown();
-              })).start();
-            // return value is not used by ContainerHeartbeatMonitor
-            return null;
-          });
+          Runnable command = invocation.getArgumentAt(0, Runnable.class);
+          (new Thread(() -> {
+            // just need to invoke the command once for these tests
+            command.run();
+            // notify that the execution is done, so verifications can begin
+            schedulerFixedRateExecutionLatch.countDown();
+          })).start();
+          // return value is not used by ContainerHeartbeatMonitor
+          return null;
+        });
     return scheduler;
   }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java b/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java
index 1ec718e..da855a1 100644
--- a/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java
+++ b/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java
@@ -84,8 +84,7 @@ public class TestRunLoop {
 
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope11).thenReturn(ssp0EndOfStream).thenReturn(ssp1EndOfStream).thenReturn(null);
     runLoop.run();
 
@@ -124,29 +123,29 @@ public class TestRunLoop {
     when(task0.offsetManager()).thenReturn(offsetManager);
     CountDownLatch firstMessageBarrier = new CountDownLatch(1);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
-        taskExecutor.submit(() -> {
-            firstMessageBarrier.await();
-            coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
-            coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
-            callback.complete();
-            return null;
-          });
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      taskExecutor.submit(() -> {
+        firstMessageBarrier.await();
+        coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+        coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+        callback.complete();
         return null;
-      }).when(task0).process(eq(envelope00), any(), any());
+      });
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
     doAnswer(invocation -> {
-        assertEquals(1, task0.metrics().messagesInFlight().getValue());
-        assertEquals(0, task0.metrics().asyncCallbackCompleted().getCount());
+      assertEquals(1, task0.metrics().messagesInFlight().getValue());
+      assertEquals(0, task0.metrics().asyncCallbackCompleted().getCount());
 
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
-        callback.complete();
-        firstMessageBarrier.countDown();
-        return null;
-      }).when(task0).process(eq(envelope01), any(), any());
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      callback.complete();
+      firstMessageBarrier.countDown();
+      return null;
+    }).when(task0).process(eq(envelope01), any(), any());
 
     Map<TaskName, RunLoopTask> tasks = new HashMap<>();
     tasks.put(taskName0, task0);
@@ -176,19 +175,18 @@ public class TestRunLoop {
 
     final AtomicInteger windowCount = new AtomicInteger(0);
     doAnswer(x -> {
-        windowCount.incrementAndGet();
-        if (windowCount.get() == 4) {
-          x.getArgumentAt(0, ReadableCoordinator.class).shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
-        }
-        return null;
-      }).when(task).window(any());
+      windowCount.incrementAndGet();
+      if (windowCount.get() == 4) {
+        x.getArgumentAt(0, ReadableCoordinator.class).shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+      }
+      return null;
+    }).when(task).window(any());
 
     Map<TaskName, RunLoopTask> tasks = new HashMap<>();
     tasks.put(taskName0, task);
 
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     when(consumerMultiplexer.choose(false)).thenReturn(null);
     runLoop.run();
 
@@ -201,16 +199,16 @@ public class TestRunLoop {
 
     RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
 
-        coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
-        coordinator.shutdown(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
+      coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+      coordinator.shutdown(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
 
-        callback.complete();
-        return null;
-      }).when(task0).process(eq(envelope00), any(), any());
+      callback.complete();
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
     RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
 
@@ -239,16 +237,16 @@ public class TestRunLoop {
 
     RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
 
-        coordinator.commit(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
-        coordinator.shutdown(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
+      coordinator.commit(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
+      coordinator.shutdown(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
 
-        callback.complete();
-        return null;
-      }).when(task0).process(eq(envelope00), any(), any());
+      callback.complete();
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
     RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
 
@@ -278,33 +276,32 @@ public class TestRunLoop {
     int maxMessagesInFlight = 1;
     RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
 
-        TaskCallback callback = callbackFactory.createCallback();
-        coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
-        callback.complete();
-        return null;
-      }).when(task0).process(eq(envelope00), any(), any());
+      TaskCallback callback = callbackFactory.createCallback();
+      coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+      callback.complete();
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
     RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
 
-        TaskCallback callback = callbackFactory.createCallback();
-        coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
-        callback.complete();
-        return null;
-      }).when(task1).process(eq(envelope11), any(), any());
+      TaskCallback callback = callbackFactory.createCallback();
+      coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+      callback.complete();
+      return null;
+    }).when(task1).process(eq(envelope11), any(), any());
 
     Map<TaskName, RunLoopTask> tasks = new HashMap<>();
     tasks.put(taskName0, task0);
     tasks.put(taskName1, task1);
 
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     // consensus is reached after envelope1 is processed.
     when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope11).thenReturn(null);
     runLoop.run();
@@ -330,8 +327,7 @@ public class TestRunLoop {
 
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     when(consumerMultiplexer.choose(false))
       .thenReturn(envelope00)
       .thenReturn(envelope11)
@@ -361,32 +357,32 @@ public class TestRunLoop {
     when(task0.offsetManager()).thenReturn(offsetManager);
     CountDownLatch firstMessageBarrier = new CountDownLatch(2);
     doAnswer(invocation -> {
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
-        taskExecutor.submit(() -> {
-            firstMessageBarrier.await();
-            callback.complete();
-            return null;
-          });
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      taskExecutor.submit(() -> {
+        firstMessageBarrier.await();
+        callback.complete();
         return null;
-      }).when(task0).process(eq(envelope00), any(), any());
+      });
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
     doAnswer(invocation -> {
-        assertEquals(1, task0.metrics().messagesInFlight().getValue());
+      assertEquals(1, task0.metrics().messagesInFlight().getValue());
 
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
-        callback.complete();
-        firstMessageBarrier.countDown();
-        return null;
-      }).when(task0).process(eq(envelope01), any(), any());
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      callback.complete();
+      firstMessageBarrier.countDown();
+      return null;
+    }).when(task0).process(eq(envelope01), any(), any());
 
     doAnswer(invocation -> {
-        assertEquals(0, task0.metrics().messagesInFlight().getValue());
-        assertEquals(2, task0.metrics().asyncCallbackCompleted().getCount());
+      assertEquals(0, task0.metrics().messagesInFlight().getValue());
+      assertEquals(2, task0.metrics().asyncCallbackCompleted().getCount());
 
-        return null;
-      }).when(task0).endOfStream(any());
+      return null;
+    }).when(task0).endOfStream(any());
 
     Map<TaskName, RunLoopTask> tasks = new HashMap<>();
     tasks.put(taskName0, task0);
@@ -395,10 +391,10 @@ public class TestRunLoop {
                                             callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope01).thenReturn(ssp0EndOfStream)
         .thenAnswer(invocation -> {
-            // this ensures that the end of stream message has passed through run loop BEFORE the last remaining in flight message completes
-            firstMessageBarrier.countDown();
-            return null;
-          });
+          // this ensures that the end of stream message has passed through run loop BEFORE the last remaining in flight message completes
+          firstMessageBarrier.countDown();
+          return null;
+        });
 
     runLoop.run();
 
@@ -411,11 +407,11 @@ public class TestRunLoop {
 
     RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(0, ReadableCoordinator.class);
+      ReadableCoordinator coordinator = invocation.getArgumentAt(0, ReadableCoordinator.class);
 
-        coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
-        return null;
-      }).when(task0).endOfStream(any());
+      coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+      return null;
+    }).when(task0).endOfStream(any());
 
     Map<TaskName, RunLoopTask> tasks = new HashMap<>();
 
@@ -445,44 +441,44 @@ public class TestRunLoop {
     when(task0.offsetManager()).thenReturn(offsetManager);
     CountDownLatch firstMessageBarrier = new CountDownLatch(1);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
-
-        taskExecutor.submit(() -> {
-            firstMessageBarrier.await();
-            coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
-            callback.complete();
-            return null;
-          });
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+
+      taskExecutor.submit(() -> {
+        firstMessageBarrier.await();
+        coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+        callback.complete();
         return null;
-      }).when(task0).process(eq(envelope00), any(), any());
+      });
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
     CountDownLatch secondMessageBarrier = new CountDownLatch(1);
     doAnswer(invocation -> {
-        ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        TaskCallback callback = callbackFactory.createCallback();
-
-        taskExecutor.submit(() -> {
-            // let the first message proceed to ask for a commit
-            firstMessageBarrier.countDown();
-            // block this message until commit is executed
-            secondMessageBarrier.await();
-            coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
-            callback.complete();
-            return null;
-          });
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+
+      taskExecutor.submit(() -> {
+        // let the first message proceed to ask for a commit
+        firstMessageBarrier.countDown();
+        // block this message until commit is executed
+        secondMessageBarrier.await();
+        coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+        callback.complete();
         return null;
-      }).when(task0).process(eq(envelope01), any(), any());
+      });
+      return null;
+    }).when(task0).process(eq(envelope01), any(), any());
 
     doAnswer(invocation -> {
-        assertEquals(1, task0.metrics().asyncCallbackCompleted().getCount());
-        assertEquals(1, task0.metrics().messagesInFlight().getValue());
+      assertEquals(1, task0.metrics().asyncCallbackCompleted().getCount());
+      assertEquals(1, task0.metrics().messagesInFlight().getValue());
 
-        secondMessageBarrier.countDown();
-        return null;
-      }).when(task0).commit();
+      secondMessageBarrier.countDown();
+      return null;
+    }).when(task0).commit();
 
     Map<TaskName, RunLoopTask> tasks = new HashMap<>();
     tasks.put(taskName0, task0);
@@ -504,17 +500,16 @@ public class TestRunLoop {
 
     RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
     doAnswer(invocation -> {
-        TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
-        callbackFactory.createCallback().failure(new Exception("Intentional failure"));
-        return null;
-      }).when(task0).process(eq(envelope00), any(), any());
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      callbackFactory.createCallback().failure(new Exception("Intentional failure"));
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
     Map<TaskName, RunLoopTask> tasks = ImmutableMap.of(taskName0, task0);
 
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-        () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
 
     when(consumerMultiplexer.choose(false))
         .thenReturn(envelope00)
diff --git a/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java b/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java
index a7f022e..6429a54 100644
--- a/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java
+++ b/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java
@@ -72,10 +72,10 @@ public class TestDiagnosticsManager {
     mockExecutorService = Mockito.mock(ScheduledExecutorService.class);
     Mockito.when(mockExecutorService.scheduleWithFixedDelay(Mockito.any(), Mockito.anyLong(), Mockito.anyLong(),
         Mockito.eq(TimeUnit.SECONDS))).thenAnswer(invocation -> {
-            ((Runnable) invocation.getArguments()[0]).run();
-            return Mockito.
-                mock(ScheduledFuture.class);
-          });
+          ((Runnable) invocation.getArguments()[0]).run();
+          return Mockito
+              .mock(ScheduledFuture.class);
+        });
 
     this.diagnosticsManager =
         new DiagnosticsManager(jobName, jobId, containerModels, containerMb, containerNumCores, numPersistentStores, maxHeapSize, containerThreadPoolSize,
@@ -83,7 +83,7 @@ public class TestDiagnosticsManager {
             mockSystemProducer, Duration.ofSeconds(1), mockExecutorService, autosizingEnabled);
 
     exceptionEventList.forEach(
-        diagnosticsExceptionEvent -> this.diagnosticsManager.addExceptionEvent(diagnosticsExceptionEvent));
+      diagnosticsExceptionEvent -> this.diagnosticsManager.addExceptionEvent(diagnosticsExceptionEvent));
 
     this.diagnosticsManager.addProcessorStopEvent("0", executionEnvContainerId, hostname, 101);
   }
diff --git a/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java b/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java
index 63d290a..c25c265 100644
--- a/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java
+++ b/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java
@@ -144,13 +144,13 @@ public class TestExecutionPlanner {
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc-> {
-        MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input1Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        input1
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-            .map(kv -> kv)
-            .sendTo(output1);
-      }, config);
+      MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input1Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      input1
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+          .map(kv -> kv)
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamStreamJoin() {
@@ -166,30 +166,30 @@ public class TestExecutionPlanner {
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 =
-            appDesc.getInputStream(input1Descriptor)
-                .map(m -> m);
-        MessageStream<KV<Object, Object>> messageStream2 =
-            appDesc.getInputStream(input2Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-                .filter(m -> true);
-        MessageStream<KV<Object, Object>> messageStream3 =
-            appDesc.getInputStream(input3Descriptor)
-                .filter(m -> true)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-                .map(m -> m);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
-
-        messageStream1
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .sendTo(output1);
-        messageStream3
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output2);
-      }, config);
+      MessageStream<KV<Object, Object>> messageStream1 =
+          appDesc.getInputStream(input1Descriptor)
+              .map(m -> m);
+      MessageStream<KV<Object, Object>> messageStream2 =
+          appDesc.getInputStream(input2Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+              .filter(m -> true);
+      MessageStream<KV<Object, Object>> messageStream3 =
+          appDesc.getInputStream(input3Descriptor)
+              .filter(m -> true)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+              .map(m -> m);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
+
+      messageStream1
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .sendTo(output1);
+      messageStream3
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output2);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithInvalidStreamStreamJoin() {
@@ -204,45 +204,45 @@ public class TestExecutionPlanner {
      *   input3 (32) --
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        messageStream1
-            .join(messageStream3,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .join(messageStream3,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithJoinAndWindow() {
 
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor).map(m -> m);
-        MessageStream<KV<Object, Object>> messageStream2 =
-          appDesc.getInputStream(input2Descriptor)
-              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-              .filter(m -> true);
-        MessageStream<KV<Object, Object>> messageStream3 =
-          appDesc.getInputStream(input3Descriptor)
-              .filter(m -> true)
-              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-              .map(m -> m);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
-
-        messageStream1.map(m -> m)
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor).map(m -> m);
+      MessageStream<KV<Object, Object>> messageStream2 =
+        appDesc.getInputStream(input2Descriptor)
+            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+            .filter(m -> true);
+      MessageStream<KV<Object, Object>> messageStream3 =
+        appDesc.getInputStream(input3Descriptor)
             .filter(m -> true)
-            .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(8), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w1");
+            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+            .map(m -> m);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
 
-        messageStream2.map(m -> m)
-            .filter(m -> true)
-            .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(16), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w2");
+      messageStream1.map(m -> m)
+          .filter(m -> true)
+          .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(8), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w1");
 
-        messageStream1.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(1600), "j1").sendTo(output1);
-        messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(100), "j2").sendTo(output2);
-        messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(252), "j3").sendTo(output2);
-      }, config);
+      messageStream2.map(m -> m)
+          .filter(m -> true)
+          .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(16), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w2");
+
+      messageStream1.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(1600), "j1").sendTo(output1);
+      messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(100), "j2").sendTo(output2);
+      messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(252), "j3").sendTo(output2);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamTableJoin() {
@@ -261,26 +261,26 @@ public class TestExecutionPlanner {
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
-        MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
+      MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-            "table-id", new KVSerde(new StringSerde(), new StringSerde()));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+          "table-id", new KVSerde(new StringSerde(), new StringSerde()));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream2
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-            .sendTo(table);
+      messageStream2
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+          .sendTo(table);
 
-        messageStream1
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-            .join(table, mock(StreamTableJoinFunction.class))
-            .join(messageStream3,
-                  mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+          .join(table, mock(StreamTableJoinFunction.class))
+          .join(messageStream3,
+                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithComplexStreamStreamJoin() {
@@ -305,37 +305,37 @@ public class TestExecutionPlanner {
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
 
-        MessageStream<KV<Object, Object>> messageStream2 =
-            appDesc.getInputStream(input2Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2");
+      MessageStream<KV<Object, Object>> messageStream2 =
+          appDesc.getInputStream(input2Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2");
 
-        MessageStream<KV<Object, Object>> messageStream3 =
-            appDesc.getInputStream(input3Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p3");
+      MessageStream<KV<Object, Object>> messageStream3 =
+          appDesc.getInputStream(input3Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p3");
 
-        MessageStream<KV<Object, Object>> messageStream4 =
-            appDesc.getInputStream(input4Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p4");
+      MessageStream<KV<Object, Object>> messageStream4 =
+          appDesc.getInputStream(input4Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p4");
 
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        messageStream1
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1")
-            .sendTo(output1);
+      messageStream1
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1")
+          .sendTo(output1);
 
-        messageStream3
-            .join(messageStream4,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output1);
+      messageStream3
+          .join(messageStream4,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output1);
 
-        messageStream2
-            .join(messageStream3,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j3")
-            .sendTo(output1);
-      }, config);
+      messageStream2
+          .join(messageStream3,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j3")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithInvalidStreamTableJoin() {
@@ -351,22 +351,22 @@ public class TestExecutionPlanner {
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream1.sendTo(table);
+      messageStream1.sendTo(table);
 
-        messageStream1
-            .join(table, mock(StreamTableJoinFunction.class))
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .join(table, mock(StreamTableJoinFunction.class))
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamTableJoinWithSideInputs() {
@@ -379,20 +379,20 @@ public class TestExecutionPlanner {
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()))
-            .withSideInputs(Arrays.asList("input1"))
-            .withSideInputsProcessor(mock(SideInputsProcessor.class));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()))
+          .withSideInputs(Arrays.asList("input1"))
+          .withSideInputsProcessor(mock(SideInputsProcessor.class));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream2
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-            .join(table, mock(StreamTableJoinFunction.class))
-            .sendTo(output1);
-      }, config);
+      messageStream2
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+          .join(table, mock(StreamTableJoinFunction.class))
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithInvalidStreamTableJoinWithSideInputs() {
@@ -407,19 +407,19 @@ public class TestExecutionPlanner {
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()))
-            .withSideInputs(Arrays.asList("input2"))
-            .withSideInputsProcessor(mock(SideInputsProcessor.class));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()))
+          .withSideInputs(Arrays.asList("input2"))
+          .withSideInputsProcessor(mock(SideInputsProcessor.class));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream1
-            .join(table, mock(StreamTableJoinFunction.class))
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .join(table, mock(StreamTableJoinFunction.class))
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamTableJoinAndSendToSameTable() {
@@ -433,17 +433,17 @@ public class TestExecutionPlanner {
      * streams participating in stream-table joins. Please, refer to SAMZA SEP-16 for more details.
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream1
-          .join(table, mock(StreamTableJoinFunction.class))
-          .sendTo(table);
+      messageStream1
+        .join(table, mock(StreamTableJoinFunction.class))
+        .sendTo(table);
 
-      }, config);
+    }, config);
   }
 
   @Before
@@ -535,8 +535,8 @@ public class TestExecutionPlanner {
     assertTrue(jobGraph.getOrCreateStreamEdge(output2Spec).getPartitionCount() == 16);
 
     jobGraph.getIntermediateStreamEdges().forEach(edge -> {
-        assertTrue(edge.getPartitionCount() == -1);
-      });
+      assertTrue(edge.getPartitionCount() == -1);
+    });
   }
 
   @Test
@@ -547,8 +547,8 @@ public class TestExecutionPlanner {
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount());
-      });
+      assertEquals(64, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -569,8 +569,8 @@ public class TestExecutionPlanner {
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount());
-      });
+      assertEquals(64, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -582,8 +582,8 @@ public class TestExecutionPlanner {
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount()); // max of input1 and output1
-      });
+      assertEquals(64, edge.getPartitionCount()); // max of input1 and output1
+    });
   }
 
   @Test
@@ -595,8 +595,8 @@ public class TestExecutionPlanner {
 
     // Partitions should be the same as input3
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(32, edge.getPartitionCount());
-      });
+      assertEquals(32, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -608,8 +608,8 @@ public class TestExecutionPlanner {
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount());
-      });
+      assertEquals(64, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -633,8 +633,8 @@ public class TestExecutionPlanner {
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertTrue(edge.getPartitionCount() == DEFAULT_PARTITIONS);
-      });
+      assertTrue(edge.getPartitionCount() == DEFAULT_PARTITIONS);
+    });
   }
 
   @Test
@@ -659,17 +659,17 @@ public class TestExecutionPlanner {
 
     ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input4Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        input1.partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1").map(kv -> kv).sendTo(output1);
-      }, config);
+      MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input4Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      input1.partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1").map(kv -> kv).sendTo(output1);
+    }, config);
 
     JobGraph jobGraph = (JobGraph) planner.plan(graphSpec);
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(partitionLimit, edge.getPartitionCount()); // max of input1 and output1
-      });
+      assertEquals(partitionLimit, edge.getPartitionCount()); // max of input1 and output1
+    });
   }
 
   @Test(expected = SamzaException.class)
@@ -836,10 +836,10 @@ public class TestExecutionPlanner {
         .filter(streamId -> inputDescriptors.containsKey(streamId)).collect(Collectors.toList()).isEmpty());
     Set<String> intermediateStreams = new HashSet<>(inputDescriptors.keySet());
     jobGraph.getInputStreams().forEach(edge -> {
-        if (intermediateStreams.contains(edge.getStreamSpec().getId())) {
-          intermediateStreams.remove(edge.getStreamSpec().getId());
-        }
-      });
+      if (intermediateStreams.contains(edge.getStreamSpec().getId())) {
+        intermediateStreams.remove(edge.getStreamSpec().getId());
+      }
+    });
     assertEquals(new HashSet<>(Arrays.asList(intermediateStream1, intermediateBroadcast)), intermediateStreams);
   }
 
diff --git a/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java b/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java
index a4d59d3..665b70a 100644
--- a/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java
+++ b/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java
@@ -179,43 +179,43 @@ public class TestJobGraphJsonGenerator {
     StreamManager streamManager = new StreamManager(systemAdmins);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        KVSerde<Object, Object> kvSerde = new KVSerde<>(new NoOpSerde(), new NoOpSerde());
-        String mockSystemFactoryClass = "factory.class.name";
-        GenericSystemDescriptor system1 = new GenericSystemDescriptor("system1", mockSystemFactoryClass);
-        GenericSystemDescriptor system2 = new GenericSystemDescriptor("system2", mockSystemFactoryClass);
-        GenericInputDescriptor<KV<Object, Object>> input1Descriptor = system1.getInputDescriptor("input1", kvSerde);
-        GenericInputDescriptor<KV<Object, Object>> input2Descriptor = system2.getInputDescriptor("input2", kvSerde);
-        GenericInputDescriptor<KV<Object, Object>> input3Descriptor = system2.getInputDescriptor("input3", kvSerde);
-        GenericOutputDescriptor<KV<Object, Object>>  output1Descriptor = system1.getOutputDescriptor("output1", kvSerde);
-        GenericOutputDescriptor<KV<Object, Object>> output2Descriptor = system2.getOutputDescriptor("output2", kvSerde);
-
-        MessageStream<KV<Object, Object>> messageStream1 =
-            appDesc.getInputStream(input1Descriptor)
-                .map(m -> m);
-        MessageStream<KV<Object, Object>> messageStream2 =
-            appDesc.getInputStream(input2Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-                .filter(m -> true);
-        MessageStream<KV<Object, Object>> messageStream3 =
-            appDesc.getInputStream(input3Descriptor)
-                .filter(m -> true)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-                .map(m -> m);
-        OutputStream<KV<Object, Object>> outputStream1 = appDesc.getOutputStream(output1Descriptor);
-        OutputStream<KV<Object, Object>> outputStream2 = appDesc.getOutputStream(output2Descriptor);
-
-        messageStream1
-            .join(messageStream2,
-                (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .sendTo(outputStream1);
-        messageStream2.sink((message, collector, coordinator) -> { });
-        messageStream3
-            .join(messageStream2,
-                (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(outputStream2);
-      }, config);
+      KVSerde<Object, Object> kvSerde = new KVSerde<>(new NoOpSerde(), new NoOpSerde());
+      String mockSystemFactoryClass = "factory.class.name";
+      GenericSystemDescriptor system1 = new GenericSystemDescriptor("system1", mockSystemFactoryClass);
+      GenericSystemDescriptor system2 = new GenericSystemDescriptor("system2", mockSystemFactoryClass);
+      GenericInputDescriptor<KV<Object, Object>> input1Descriptor = system1.getInputDescriptor("input1", kvSerde);
+      GenericInputDescriptor<KV<Object, Object>> input2Descriptor = system2.getInputDescriptor("input2", kvSerde);
+      GenericInputDescriptor<KV<Object, Object>> input3Descriptor = system2.getInputDescriptor("input3", kvSerde);
+      GenericOutputDescriptor<KV<Object, Object>>  output1Descriptor = system1.getOutputDescriptor("output1", kvSerde);
+      GenericOutputDescriptor<KV<Object, Object>> output2Descriptor = system2.getOutputDescriptor("output2", kvSerde);
+
+      MessageStream<KV<Object, Object>> messageStream1 =
+          appDesc.getInputStream(input1Descriptor)
+              .map(m -> m);
+      MessageStream<KV<Object, Object>> messageStream2 =
+          appDesc.getInputStream(input2Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+              .filter(m -> true);
+      MessageStream<KV<Object, Object>> messageStream3 =
+          appDesc.getInputStream(input3Descriptor)
+              .filter(m -> true)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+              .map(m -> m);
+      OutputStream<KV<Object, Object>> outputStream1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> outputStream2 = appDesc.getOutputStream(output2Descriptor);
+
+      messageStream1
+          .join(messageStream2,
+              (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .sendTo(outputStream1);
+      messageStream2.sink((message, collector, coordinator) -> { });
+      messageStream3
+          .join(messageStream2,
+              (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(outputStream2);
+    }, config);
 
     ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
     ExecutionPlan plan = planner.plan(graphSpec);
@@ -255,27 +255,24 @@ public class TestJobGraphJsonGenerator {
     StreamManager streamManager = new StreamManager(systemAdmins);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        KVSerde<String, PageViewEvent> pvSerde = KVSerde.of(new StringSerde(), new JsonSerdeV2<>(PageViewEvent.class));
-        GenericSystemDescriptor isd = new GenericSystemDescriptor("hdfs", "mockSystemFactoryClass");
-        GenericInputDescriptor<KV<String, PageViewEvent>> pageView = isd.getInputDescriptor("PageView", pvSerde);
-
-        KVSerde<String, Long> pvcSerde = KVSerde.of(new StringSerde(), new LongSerde());
-        GenericSystemDescriptor osd = new GenericSystemDescriptor("kafka", "mockSystemFactoryClass");
-        GenericOutputDescriptor<KV<String, Long>> pageViewCount = osd.getOutputDescriptor("PageViewCount", pvcSerde);
-
-        MessageStream<KV<String, PageViewEvent>> inputStream = appDesc.getInputStream(pageView);
-        OutputStream<KV<String, Long>> outputStream = appDesc.getOutputStream(pageViewCount);
-        inputStream
-            .partitionBy(kv -> kv.getValue().getCountry(), kv -> kv.getValue(), pvSerde, "keyed-by-country")
-            .window(Windows.keyedTumblingWindow(kv -> kv.getValue().getCountry(),
-                Duration.ofSeconds(10L),
-                () -> 0L,
-                (m, c) -> c + 1L,
-                new StringSerde(),
-                new LongSerde()), "count-by-country")
-            .map(pane -> new KV<>(pane.getKey().getKey(), pane.getMessage()))
-            .sendTo(outputStream);
-      }, config);
+      KVSerde<String, PageViewEvent> pvSerde = KVSerde.of(new StringSerde(), new JsonSerdeV2<>(PageViewEvent.class));
+      GenericSystemDescriptor isd = new GenericSystemDescriptor("hdfs", "mockSystemFactoryClass");
+      GenericInputDescriptor<KV<String, PageViewEvent>> pageView = isd.getInputDescriptor("PageView", pvSerde);
+
+      KVSerde<String, Long> pvcSerde = KVSerde.of(new StringSerde(), new LongSerde());
+      GenericSystemDescriptor osd = new GenericSystemDescriptor("kafka", "mockSystemFactoryClass");
+      GenericOutputDescriptor<KV<String, Long>> pageViewCount = osd.getOutputDescriptor("PageViewCount", pvcSerde);
+
+      MessageStream<KV<String, PageViewEvent>> inputStream = appDesc.getInputStream(pageView);
+      OutputStream<KV<String, Long>> outputStream = appDesc.getOutputStream(pageViewCount);
+      inputStream
+          .partitionBy(kv -> kv.getValue().getCountry(), kv -> kv.getValue(), pvSerde, "keyed-by-country")
+          .window(Windows.keyedTumblingWindow(kv -> kv.getValue().getCountry(),
+              Duration.ofSeconds(10L), () -> 0L, (m, c) -> c + 1L, new StringSerde(), new LongSerde()),
+              "count-by-country")
+          .map(pane -> new KV<>(pane.getKey().getKey(), pane.getMessage()))
+          .sendTo(outputStream);
+    }, config);
 
     ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
     ExecutionPlan plan = planner.plan(graphSpec);
diff --git a/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java b/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java
index 2d80e79..93b712f 100644
--- a/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java
+++ b/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java
@@ -289,8 +289,8 @@ public class TestJobNodeConfigurationGenerator extends ExecutionPlannerTestBase
     SerializableSerde<Serde> serializableSerde = new SerializableSerde<>();
     assertEquals(numSerdes, serializers.size());
     return serializers.entrySet().stream().collect(Collectors.toMap(
-        e -> e.getKey().replace(SerializerConfig.SERIALIZED_INSTANCE_SUFFIX, ""),
-        e -> serializableSerde.fromBytes(Base64.getDecoder().decode(e.getValue().getBytes()))
+      e -> e.getKey().replace(SerializerConfig.SERIALIZED_INSTANCE_SUFFIX, ""),
+      e -> serializableSerde.fromBytes(Base64.getDecoder().decode(e.getValue().getBytes()))
     ));
   }
 
diff --git a/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java b/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java
index b310e6f..907dd7d 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java
@@ -98,15 +98,15 @@ public class TestJoinOperator {
     Config config = new MapConfig(mapConfig);
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        IntegerSerde integerSerde = new IntegerSerde();
-        KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
-        GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
-        GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor = sd.getInputDescriptor("inStream", kvSerde);
+      IntegerSerde integerSerde = new IntegerSerde();
+      KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
+      GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor = sd.getInputDescriptor("inStream", kvSerde);
 
-        MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor);
+      MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor);
 
-        inStream.join(inStream, new TestJoinFunction(), integerSerde, kvSerde, kvSerde, JOIN_TTL, "join");
-      }, config);
+      inStream.join(inStream, new TestJoinFunction(), integerSerde, kvSerde, kvSerde, JOIN_TTL, "join");
+    }, config);
 
     createStreamOperatorTask(new SystemClock(), streamAppDesc); // should throw an exception
   }
@@ -336,22 +336,22 @@ public class TestJoinOperator {
     Config config = new MapConfig(mapConfig);
 
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        IntegerSerde integerSerde = new IntegerSerde();
-        KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
-        GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
-        GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor1 = sd.getInputDescriptor("inStream", kvSerde);
-        GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor2 = sd.getInputDescriptor("inStream2", kvSerde);
-
-        MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor1);
-        MessageStream<KV<Integer, Integer>> inStream2 = appDesc.getInputStream(inputDescriptor2);
-
-        inStream
-            .join(inStream2, joinFn, integerSerde, kvSerde, kvSerde, JOIN_TTL, "j1")
-            .sink((message, messageCollector, taskCoordinator) -> {
-                SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-                messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-              });
-      }, config);
+      IntegerSerde integerSerde = new IntegerSerde();
+      KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
+      GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor1 = sd.getInputDescriptor("inStream", kvSerde);
+      GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor2 = sd.getInputDescriptor("inStream2", kvSerde);
+
+      MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor1);
+      MessageStream<KV<Integer, Integer>> inStream2 = appDesc.getInputStream(inputDescriptor2);
+
+      inStream
+          .join(inStream2, joinFn, integerSerde, kvSerde, kvSerde, JOIN_TTL, "j1")
+          .sink((message, messageCollector, taskCoordinator) -> {
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
+    }, config);
   }
 
   private static class TestJoinFunction
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java b/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java
index 9ff9a4f..5e23653 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java
@@ -60,11 +60,11 @@ public class TestControlMessageSender {
     Set<Integer> partitions = new HashSet<>();
     MessageCollector collector = mock(MessageCollector.class);
     doAnswer(invocation -> {
-        OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
-        partitions.add((Integer) envelope.getPartitionKey());
-        assertEquals(envelope.getSystemStream(), systemStream);
-        return null;
-      }).when(collector).send(any());
+      OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
+      partitions.add((Integer) envelope.getPartitionKey());
+      assertEquals(envelope.getSystemStream(), systemStream);
+      return null;
+    }).when(collector).send(any());
 
     ControlMessageSender sender = new ControlMessageSender(metadataCache);
     WatermarkMessage watermark = new WatermarkMessage(System.currentTimeMillis(), "task 0");
@@ -88,11 +88,11 @@ public class TestControlMessageSender {
     Set<Integer> partitions = new HashSet<>();
     MessageCollector collector = mock(MessageCollector.class);
     doAnswer(invocation -> {
-        OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
-        partitions.add((Integer) envelope.getPartitionKey());
-        assertEquals(envelope.getSystemStream(), systemStream);
-        return null;
-      }).when(collector).send(any());
+      OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
+      partitions.add((Integer) envelope.getPartitionKey());
+      assertEquals(envelope.getSystemStream(), systemStream);
+      return null;
+    }).when(collector).send(any());
 
     ControlMessageSender sender = new ControlMessageSender(metadataCache);
     WatermarkMessage watermark = new WatermarkMessage(System.currentTimeMillis(), "task 0");
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java b/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java
index 579c028..ea57479 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java
@@ -132,17 +132,17 @@ public class TestOperatorImplGraph {
     when(this.context.getJobContext().getConfig()).thenReturn(config);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        GenericOutputDescriptor outputDescriptor = sd.getOutputDescriptor(outputStreamId, mock(Serde.class));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        OutputStream<Object> outputStream = appDesc.getOutputStream(outputDescriptor);
-
-        inputStream
-            .filter(mock(FilterFunction.class))
-            .map(mock(MapFunction.class))
-            .sendTo(outputStream);
-      }, config);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      GenericOutputDescriptor outputDescriptor = sd.getOutputDescriptor(outputStreamId, mock(Serde.class));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      OutputStream<Object> outputStream = appDesc.getOutputStream(outputDescriptor);
+
+      inputStream
+          .filter(mock(FilterFunction.class))
+          .map(mock(MapFunction.class))
+          .sendTo(outputStream);
+    }, config);
 
     OperatorImplGraph opImplGraph =
         new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, mock(Clock.class));
@@ -184,19 +184,19 @@ public class TestOperatorImplGraph {
     when(this.context.getJobContext().getConfig()).thenReturn(config);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = isd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        GenericOutputDescriptor outputDescriptor = osd.getOutputDescriptor(outputStreamId,
-            KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        OutputStream<KV<Integer, String>> outputStream = appDesc.getOutputStream(outputDescriptor);
-
-        inputStream
-            .partitionBy(Object::hashCode, Object::toString,
-                KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)), "p1")
-            .sendTo(outputStream);
-      }, config);
+      GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = isd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      GenericOutputDescriptor outputDescriptor = osd.getOutputDescriptor(outputStreamId,
+          KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      OutputStream<KV<Integer, String>> outputStream = appDesc.getOutputStream(outputDescriptor);
+
+      inputStream
+          .partitionBy(Object::hashCode, Object::toString,
+              KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)), "p1")
+          .sendTo(outputStream);
+    }, config);
 
     JobModel jobModel = mock(JobModel.class);
     ContainerModel containerModel = mock(ContainerModel.class);
@@ -236,12 +236,12 @@ public class TestOperatorImplGraph {
     Config config = new MapConfig(configMap);
     when(this.context.getJobContext().getConfig()).thenReturn(config);
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        inputStream.filter(mock(FilterFunction.class));
-        inputStream.map(mock(MapFunction.class));
-      }, config);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      inputStream.filter(mock(FilterFunction.class));
+      inputStream.map(mock(MapFunction.class));
+    }, config);
 
     OperatorImplGraph opImplGraph =
         new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, mock(Clock.class));
@@ -259,14 +259,14 @@ public class TestOperatorImplGraph {
     String inputStreamId = "input";
     String inputSystem = "input-system";
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        MessageStream<Object> stream1 = inputStream.filter(mock(FilterFunction.class));
-        MessageStream<Object> stream2 = inputStream.map(mock(MapFunction.class));
-        stream1.merge(Collections.singleton(stream2))
-            .map(new TestMapFunction<Object, Object>("test-map-1", (Function & Serializable) m -> m));
-      }, getConfig());
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      MessageStream<Object> stream1 = inputStream.filter(mock(FilterFunction.class));
+      MessageStream<Object> stream2 = inputStream.map(mock(MapFunction.class));
+      stream1.merge(Collections.singleton(stream2))
+          .map(new TestMapFunction<Object, Object>("test-map-1", (Function & Serializable) m -> m));
+    }, getConfig());
 
     TaskName mockTaskName = mock(TaskName.class);
     TaskModel taskModel = mock(TaskModel.class);
@@ -277,7 +277,7 @@ public class TestOperatorImplGraph {
         new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, mock(Clock.class));
 
     Set<OperatorImpl> opSet = opImplGraph.getAllInputOperators().stream().collect(HashSet::new,
-        (s, op) -> addOperatorRecursively(s, op), HashSet::addAll);
+      (s, op) -> addOperatorRecursively(s, op), HashSet::addAll);
     Object[] mergeOps = opSet.stream().filter(op -> op.getOperatorSpec().getOpCode() == OpCode.MERGE).toArray();
     assertEquals(1, mergeOps.length);
     assertEquals(1, ((OperatorImpl) mergeOps[0]).registeredOperators.size());
@@ -309,15 +309,15 @@ public class TestOperatorImplGraph {
         (BiFunction & Serializable) (m1, m2) -> KV.of(m1, m2), keyFn, keyFn);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
-        MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
-        MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
+      MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
+      MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
 
-        inputStream1.join(inputStream2, testJoinFunction,
-            mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1");
-      }, config);
+      inputStream1.join(inputStream2, testJoinFunction,
+          mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1");
+    }, config);
 
     TaskName mockTaskName = mock(TaskName.class);
     TaskModel taskModel = mock(TaskModel.class);
@@ -377,19 +377,19 @@ public class TestOperatorImplGraph {
     when(this.context.getTaskContext().getTaskModel()).thenReturn(taskModel);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
-        MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
-        MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
+      MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
+      MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
 
-        Function mapFn = (Function & Serializable) m -> m;
-        inputStream1.map(new TestMapFunction<Object, Object>("1", mapFn))
-            .map(new TestMapFunction<Object, Object>("2", mapFn));
+      Function mapFn = (Function & Serializable) m -> m;
+      inputStream1.map(new TestMapFunction<Object, Object>("1", mapFn))
+          .map(new TestMapFunction<Object, Object>("2", mapFn));
 
-        inputStream2.map(new TestMapFunction<Object, Object>("3", mapFn))
-            .map(new TestMapFunction<Object, Object>("4", mapFn));
-      }, getConfig());
+      inputStream2.map(new TestMapFunction<Object, Object>("3", mapFn))
+          .map(new TestMapFunction<Object, Object>("4", mapFn));
+    }, getConfig());
 
     OperatorImplGraph opImplGraph = new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, SystemClock.instance());
 
@@ -475,33 +475,33 @@ public class TestOperatorImplGraph {
     when(this.context.getJobContext().getConfig()).thenReturn(config);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor1 = isd.getInputDescriptor(inputStreamId1, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor2 = isd.getInputDescriptor(inputStreamId2, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor3 = isd.getInputDescriptor(inputStreamId3, mock(Serde.class));
-        GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
-        GenericOutputDescriptor outputDescriptor1 = osd.getOutputDescriptor(outputStreamId1, mock(Serde.class));
-        GenericOutputDescriptor outputDescriptor2 = osd.getOutputDescriptor(outputStreamId2, mock(Serde.class));
-        MessageStream messageStream1 = appDesc.getInputStream(inputDescriptor1).map(m -> m);
-        MessageStream messageStream2 = appDesc.getInputStream(inputDescriptor2).filter(m -> true);
-        MessageStream messageStream3 =
-            appDesc.getInputStream(inputDescriptor3)
-                .filter(m -> true)
-                .partitionBy(m -> "m", m -> m, mock(KVSerde.class),  "p1")
-                .map(m -> m);
-        OutputStream<Object> outputStream1 = appDesc.getOutputStream(outputDescriptor1);
-        OutputStream<Object> outputStream2 = appDesc.getOutputStream(outputDescriptor2);
-
-        messageStream1
-            .join(messageStream2, mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .partitionBy(m -> "m", m -> m, mock(KVSerde.class), "p2")
-            .sendTo(outputStream1);
-        messageStream3
-            .join(messageStream2, mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(outputStream2);
-      }, config);
+      GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor1 = isd.getInputDescriptor(inputStreamId1, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor2 = isd.getInputDescriptor(inputStreamId2, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor3 = isd.getInputDescriptor(inputStreamId3, mock(Serde.class));
+      GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
+      GenericOutputDescriptor outputDescriptor1 = osd.getOutputDescriptor(outputStreamId1, mock(Serde.class));
+      GenericOutputDescriptor outputDescriptor2 = osd.getOutputDescriptor(outputStreamId2, mock(Serde.class));
+      MessageStream messageStream1 = appDesc.getInputStream(inputDescriptor1).map(m -> m);
+      MessageStream messageStream2 = appDesc.getInputStream(inputDescriptor2).filter(m -> true);
+      MessageStream messageStream3 =
+          appDesc.getInputStream(inputDescriptor3)
+              .filter(m -> true)
+              .partitionBy(m -> "m", m -> m, mock(KVSerde.class),  "p1")
+              .map(m -> m);
+      OutputStream<Object> outputStream1 = appDesc.getOutputStream(outputDescriptor1);
+      OutputStream<Object> outputStream2 = appDesc.getOutputStream(outputDescriptor2);
+
+      messageStream1
+          .join(messageStream2, mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .partitionBy(m -> "m", m -> m, mock(KVSerde.class), "p2")
+          .sendTo(outputStream1);
+      messageStream3
+          .join(messageStream2, mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(outputStream2);
+    }, config);
 
     Multimap<SystemStream, SystemStream> outputToInput =
         OperatorImplGraph.getIntermediateToInputStreamsMap(graphSpec.getOperatorSpecGraph(), new StreamConfig(config));
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java b/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java
index 594cd4a..76b79a7 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java
@@ -114,7 +114,7 @@ public class TestWindowOperator {
     StreamOperatorTask task = new StreamOperatorTask(sgb, testClock);
     task.init(this.context);
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     integers.forEach(n -> task.processAsync(new IntegerEnvelope(n), messageCollector, taskCoordinator, taskCallback));
     testClock.advanceTime(Duration.ofSeconds(1));
 
@@ -148,7 +148,7 @@ public class TestWindowOperator {
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     Assert.assertEquals(windowPanes.size(), 0);
 
     integers.forEach(n -> task.processAsync(new IntegerEnvelope(n), messageCollector, taskCoordinator, taskCallback));
@@ -197,7 +197,7 @@ public class TestWindowOperator {
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     integers.forEach(n -> task.processAsync(new IntegerEnvelope(n), messageCollector, taskCoordinator, taskCallback));
     testClock.advanceTime(Duration.ofSeconds(1));
     task.window(messageCollector, taskCoordinator);
@@ -225,7 +225,7 @@ public class TestWindowOperator {
     StreamOperatorTask task = new StreamOperatorTask(sgb, testClock);
     task.init(this.context);
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     testClock.advanceTime(Duration.ofSeconds(1));
@@ -271,7 +271,7 @@ public class TestWindowOperator {
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.init(this.context);
 
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
@@ -304,7 +304,7 @@ public class TestWindowOperator {
 
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     Assert.assertEquals(windowPanes.size(), 1);
@@ -348,7 +348,7 @@ public class TestWindowOperator {
 
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     //assert that the count trigger fired
@@ -403,7 +403,7 @@ public class TestWindowOperator {
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
 
     TestClock testClock = new TestClock();
     StreamOperatorTask task = new StreamOperatorTask(sgb, testClock);
@@ -446,7 +446,7 @@ public class TestWindowOperator {
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     Assert.assertEquals(windowPanes.size(), 0);
 
     List<Integer> integerList = ImmutableList.of(1, 2, 1, 2, 1);
@@ -479,7 +479,7 @@ public class TestWindowOperator {
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
 
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
@@ -510,7 +510,7 @@ public class TestWindowOperator {
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
 
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
@@ -537,9 +537,9 @@ public class TestWindowOperator {
           .window(Windows.keyedTumblingWindow(KV::getKey, duration, new IntegerSerde(), kvSerde)
               .setEarlyTrigger(earlyTrigger).setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
@@ -555,9 +555,9 @@ public class TestWindowOperator {
           .window(Windows.tumblingWindow(duration, kvSerde).setEarlyTrigger(earlyTrigger)
               .setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
@@ -572,9 +572,9 @@ public class TestWindowOperator {
           .window(Windows.keyedSessionWindow(KV::getKey, duration, new IntegerSerde(), kvSerde)
               .setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
@@ -594,9 +594,9 @@ public class TestWindowOperator {
               .setEarlyTrigger(earlyTrigger)
               .setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java b/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java
index 94e171a..cabd6d3 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java
@@ -98,15 +98,15 @@ public class TestTimeSeriesStoreImpl {
     List<TimestampedValue<byte[]>> values = readStore(timeSeriesStore, "hello", 0L, 2L);
     Assert.assertEquals(100, values.size());
     values.forEach(timeSeriesValue -> {
-        Assert.assertEquals("world-1", new String(timeSeriesValue.getValue()));
-      });
+      Assert.assertEquals("world-1", new String(timeSeriesValue.getValue()));
+    });
 
     // read from time-range [2,4) should return 100 entries
     values = readStore(timeSeriesStore, "hello", 2L, 4L);
     Assert.assertEquals(100, values.size());
     values.forEach(timeSeriesValue -> {
-        Assert.assertEquals("world-2", new String(timeSeriesValue.getValue()));
-      });
+      Assert.assertEquals("world-2", new String(timeSeriesValue.getValue()));
+    });
 
     // read all entries in the store
     values = readStore(timeSeriesStore, "hello", 0L, Integer.MAX_VALUE);
diff --git a/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java b/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java
index 71c3486..e0a4f1a 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java
@@ -164,7 +164,7 @@ public class TestOperatorSpec {
   @Test
   public void testStreamOperatorSpecWithMap() {
     MapFunction<TestMessageEnvelope, TestOutputMessageEnvelope> mapFn =
-        m -> new TestOutputMessageEnvelope(m.getKey(), m.getMessage().hashCode());
+      m -> new TestOutputMessageEnvelope(m.getKey(), m.getMessage().hashCode());
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> streamOperatorSpec =
         OperatorSpecs.createMapOperatorSpec(mapFn, "op0");
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> cloneOperatorSpec =
@@ -381,7 +381,7 @@ public class TestOperatorSpec {
     List<String> keys = new ArrayList<>(1);
     keys.add(0, "test-1");
     MapFunction<TestMessageEnvelope, TestOutputMessageEnvelope> mapFn =
-        m -> new TestOutputMessageEnvelope(keys.get(m.getKey().hashCode() % 1), integers.get(m.getMessage().hashCode() % 1));
+      m -> new TestOutputMessageEnvelope(keys.get(m.getKey().hashCode() % 1), integers.get(m.getMessage().hashCode() % 1));
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> streamOperatorSpec =
         OperatorSpecs.createMapOperatorSpec(mapFn, "op0");
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> cloneOperatorSpec =
diff --git a/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java b/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java
index 101e629..870f813 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java
@@ -62,9 +62,9 @@ public class TestPartitionByOperatorSpec {
     MapFunction<Object, Object> valueFn = m -> m;
     KVSerde<Object, Object> partitionBySerde = KVSerde.of(new NoOpSerde<>(), new NoOpSerde<>());
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, valueFn, partitionBySerde, testRepartitionedStreamName);
-      }, getConfig());
+      MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, valueFn, partitionBySerde, testRepartitionedStreamName);
+    }, getConfig());
     assertEquals(2, streamAppDesc.getInputOperators().size());
     Map<String, InputOperatorSpec> inputOpSpecs = streamAppDesc.getInputOperators();
     assertTrue(inputOpSpecs.keySet().contains(String.format("%s-%s-partition_by-%s", testJobName, testJobId, testRepartitionedStreamName)));
@@ -91,9 +91,9 @@ public class TestPartitionByOperatorSpec {
     MapFunction<Object, String> keyFn = m -> m.toString();
     MapFunction<Object, Object> valueFn = m -> m;
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, valueFn, mock(KVSerde.class), testRepartitionedStreamName);
-      }, getConfig());
+      MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, valueFn, mock(KVSerde.class), testRepartitionedStreamName);
+    }, getConfig());
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(
         String.format("%s-%s-partition_by-%s", testJobName, testJobId, testRepartitionedStreamName));
     assertNotNull(inputOpSpec);
@@ -116,9 +116,9 @@ public class TestPartitionByOperatorSpec {
   @Test
   public void testCopy() {
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(m -> m.toString(), m -> m, mock(KVSerde.class), testRepartitionedStreamName);
-      }, getConfig());
+      MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(m -> m.toString(), m -> m, mock(KVSerde.class), testRepartitionedStreamName);
+    }, getConfig());
     OperatorSpecGraph specGraph = streamAppDesc.getOperatorSpecGraph();
     OperatorSpecGraph clonedGraph = specGraph.clone();
     OperatorSpecTestUtils.assertClonedGraph(specGraph, clonedGraph);
@@ -128,36 +128,36 @@ public class TestPartitionByOperatorSpec {
   public void testScheduledFunctionAsKeyFn() {
     ScheduledMapFn keyFn = new ScheduledMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testWatermarkFunctionAsKeyFn() {
     WatermarkMapFn keyFn = new WatermarkMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testScheduledFunctionAsValueFn() {
     ScheduledMapFn valueFn = new ScheduledMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testWatermarkFunctionAsValueFn() {
     WatermarkMapFn valueFn = new WatermarkMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   private Config getConfig() {
diff --git a/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java b/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java
index 0978738..bb601ce 100644
--- a/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java
+++ b/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java
@@ -124,19 +124,17 @@ public class TestStreamProcessor {
     SamzaContainer createSamzaContainer(String processorId, JobModel jobModel) {
       if (container == null) {
         RunLoop mockRunLoop = mock(RunLoop.class);
-        doAnswer(invocation ->
-          {
-            runLoopStartForMain.countDown();
-            containerStop.await();
-            Thread.sleep(this.runLoopShutdownDuration.toMillis());
-            return null;
-          }).when(mockRunLoop).run();
-
-        Mockito.doAnswer(invocation ->
-          {
-            containerStop.countDown();
-            return null;
-          }).when(mockRunLoop).shutdown();
+        doAnswer(invocation -> {
+          runLoopStartForMain.countDown();
+          containerStop.await();
+          Thread.sleep(this.runLoopShutdownDuration.toMillis());
+          return null;
+        }).when(mockRunLoop).run();
+
+        Mockito.doAnswer(invocation -> {
+          containerStop.countDown();
+          return null;
+        }).when(mockRunLoop).shutdown();
         container = StreamProcessorTestUtils.getDummyContainer(mockRunLoop, Mockito.mock(StreamTask.class));
       }
       return container;
@@ -198,29 +196,26 @@ public class TestStreamProcessor {
         null);
 
     final CountDownLatch coordinatorStop = new CountDownLatch(1);
-    final Thread jcThread = new Thread(() ->
-      {
-        try {
-          processor.jobCoordinatorListener.onJobModelExpired();
-          processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
-          coordinatorStop.await();
-          processor.jobCoordinatorListener.onCoordinatorStop();
-        } catch (InterruptedException e) {
-          e.printStackTrace();
-        }
-      });
+    final Thread jcThread = new Thread(() -> {
+      try {
+        processor.jobCoordinatorListener.onJobModelExpired();
+        processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
+        coordinatorStop.await();
+        processor.jobCoordinatorListener.onCoordinatorStop();
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+    });
 
-    doAnswer(invocation ->
-      {
-        coordinatorStop.countDown();
-        return null;
-      }).when(mockJobCoordinator).stop();
+    doAnswer(invocation -> {
+      coordinatorStop.countDown();
+      return null;
+    }).when(mockJobCoordinator).stop();
 
-    doAnswer(invocation ->
-      {
-        jcThread.start();
-        return null;
-      }).when(mockJobCoordinator).start();
+    doAnswer(invocation -> {
+      jcThread.start();
+      return null;
+    }).when(mockJobCoordinator).start();
 
     processor.start();
     processorListenerStart.await(10, TimeUnit.SECONDS);
@@ -277,29 +272,29 @@ public class TestStreamProcessor {
         Duration.of(1, ChronoUnit.SECONDS));
 
     Thread jcThread = new Thread(() -> {
-        // gets processor into rebalance mode so onNewJobModel creates a new container
-        processor.jobCoordinatorListener.onJobModelExpired();
-        processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
-        try {
-          // wait for the run loop to be ready before triggering rebalance
-          processor.runLoopStartForMain.await();
-        } catch (InterruptedException e) {
-          e.printStackTrace();
-        }
-        processor.jobCoordinatorListener.onJobModelExpired();
-      });
+      // gets processor into rebalance mode so onNewJobModel creates a new container
+      processor.jobCoordinatorListener.onJobModelExpired();
+      processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
+      try {
+        // wait for the run loop to be ready before triggering rebalance
+        processor.runLoopStartForMain.await();
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+      processor.jobCoordinatorListener.onJobModelExpired();
+    });
     doAnswer(invocation -> {
-        jcThread.start();
-        return null;
-      }).when(mockJobCoordinator).start();
+      jcThread.start();
+      return null;
+    }).when(mockJobCoordinator).start();
 
     // ensure that the coordinator stop occurred before checking the exception being thrown
     CountDownLatch coordinatorStop = new CountDownLatch(1);
     doAnswer(invocation -> {
-        processor.jobCoordinatorListener.onCoordinatorStop();
-        coordinatorStop.countDown();
-        return null;
-      }).when(mockJobCoordinator).stop();
+      processor.jobCoordinatorListener.onCoordinatorStop();
+      coordinatorStop.countDown();
+      return null;
+    }).when(mockJobCoordinator).stop();
 
     processor.start();
 
@@ -324,16 +319,15 @@ public class TestStreamProcessor {
     AtomicReference<Throwable> actualThrowable = new AtomicReference<>();
     final CountDownLatch runLoopStartedLatch = new CountDownLatch(1);
     RunLoop failingRunLoop = mock(RunLoop.class);
-    doAnswer(invocation ->
-      {
-        try {
-          runLoopStartedLatch.countDown();
-          throw expectedThrowable;
-        } catch (InterruptedException ie) {
-          ie.printStackTrace();
-        }
-        return null;
-      }).when(failingRunLoop).run();
+    doAnswer(invocation -> {
+      try {
+        runLoopStartedLatch.countDown();
+        throw expectedThrowable;
+      } catch (InterruptedException ie) {
+        ie.printStackTrace();
+      }
+      return null;
+    }).when(failingRunLoop).run();
 
     SamzaContainer mockContainer = StreamProcessorTestUtils.getDummyContainer(failingRunLoop, mock(StreamTask.class));
     final CountDownLatch processorListenerFailed = new CountDownLatch(1);
@@ -369,27 +363,24 @@ public class TestStreamProcessor {
         mockContainer);
 
     final CountDownLatch coordinatorStop = new CountDownLatch(1);
-    doAnswer(invocation ->
-      {
-        coordinatorStop.countDown();
-        return null;
-      }).when(mockJobCoordinator).stop();
+    doAnswer(invocation -> {
+      coordinatorStop.countDown();
+      return null;
+    }).when(mockJobCoordinator).stop();
 
-    doAnswer(invocation ->
-      {
-        new Thread(() ->
-          {
-            try {
-              processor.jobCoordinatorListener.onJobModelExpired();
-              processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
-              coordinatorStop.await();
-              processor.jobCoordinatorListener.onCoordinatorStop();
-            } catch (InterruptedException e) {
-              e.printStackTrace();
-            }
-          }).start();
-        return null;
-      }).when(mockJobCoordinator).start();
+    doAnswer(invocation -> {
+      new Thread(() -> {
+        try {
+          processor.jobCoordinatorListener.onJobModelExpired();
+          processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
+          coordinatorStop.await();
+          processor.jobCoordinatorListener.onCoordinatorStop();
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+      }).start();
+      return null;
+    }).when(mockJobCoordinator).start();
 
     processor.start();
 
@@ -517,11 +508,11 @@ public class TestStreamProcessor {
      */
 
     Mockito.when(executorService.shutdownNow()).thenAnswer(ctx -> {
-        if (!failContainerInterrupt) {
-          shutdownLatch.countDown();
-        }
-        return null;
-      });
+      if (!failContainerInterrupt) {
+        shutdownLatch.countDown();
+      }
+      return null;
+    });
     Mockito.when(executorService.isShutdown()).thenReturn(true);
 
     streamProcessor.state = State.IN_REBALANCE;
@@ -617,8 +608,8 @@ public class TestStreamProcessor {
     AtomicReference<MockStreamProcessorLifecycleListener> mockListener = new AtomicReference<>();
     StreamProcessor streamProcessor =
         new StreamProcessor("TestProcessorId", mock(Config.class), new HashMap<>(), mock(TaskFactory.class),
-            Optional.empty(), Optional.empty(), Optional.empty(),
-            sp -> mockListener.updateAndGet(old -> new MockStreamProcessorLifecycleListener(sp)),
+            Optional.empty(), Optional.empty(), Optional.empty(), sp ->
+            mockListener.updateAndGet(old -> new MockStreamProcessorLifecycleListener(sp)),
             mock(JobCoordinator.class), Mockito.mock(MetadataStore.class));
     assertEquals(streamProcessor, mockListener.get().processor);
   }
diff --git a/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java b/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java
index 777929a..4bd41d0 100644
--- a/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java
+++ b/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java
@@ -78,10 +78,10 @@ public class TestClusterBasedProcessorLifecycleListener {
   @Test
   public void testShutdownHookInvokesShutdownHookCallback() {
     doAnswer(invocation -> {
-        // Simulate call to container.shutdown()
-        clusterBasedProcessorLifecycleListener.afterStop();
-        return null;
-      }).when(mockShutdownHookCallback).run();
+      // Simulate call to container.shutdown()
+      clusterBasedProcessorLifecycleListener.afterStop();
+      return null;
+    }).when(mockShutdownHookCallback).run();
 
     // call beforeStart to setup shutdownHook
     clusterBasedProcessorLifecycleListener.beforeStart();
diff --git a/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java b/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java
index cef7906..344f082 100644
--- a/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java
+++ b/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java
@@ -118,13 +118,12 @@ public class TestLocalApplicationRunner {
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -157,13 +156,12 @@ public class TestLocalApplicationRunner {
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     doReturn(sp).when(runner).createStreamProcessor(anyObject(), anyObject(),
         captor.capture(), eq(Optional.empty()), any(CoordinatorStreamStore.class));
@@ -195,13 +193,12 @@ public class TestLocalApplicationRunner {
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -233,13 +230,12 @@ public class TestLocalApplicationRunner {
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -269,10 +265,9 @@ public class TestLocalApplicationRunner {
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        throw new Exception("test failure");
-      }).when(sp).start();
+    doAnswer(i -> {
+      throw new Exception("test failure");
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -310,19 +305,17 @@ public class TestLocalApplicationRunner {
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      return null;
+    }).when(sp).start();
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStop();
-        return null;
-      }).when(sp).stop();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStop();
+      return null;
+    }).when(sp).stop();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -354,19 +347,17 @@ public class TestLocalApplicationRunner {
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        return null;
-      }).when(sp).start();
-
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStop();
-        return null;
-      }).when(sp).stop();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      return null;
+    }).when(sp).start();
+
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStop();
+      return null;
+    }).when(sp).stop();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
diff --git a/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java b/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java
index 4fd3dcf..008f2f6 100644
--- a/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java
+++ b/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java
@@ -44,18 +44,18 @@ public class TestEpochTimeScheduler {
   private ScheduledExecutorService createExecutorService() {
     ScheduledExecutorService service = mock(ScheduledExecutorService.class);
     when(service.schedule((Runnable) anyObject(), anyLong(), anyObject())).thenAnswer(invocation -> {
-        Object[] args = invocation.getArguments();
-        Runnable runnable = (Runnable) args[0];
-        runnable.run();
-        return mock(ScheduledFuture.class);
-      });
+      Object[] args = invocation.getArguments();
+      Runnable runnable = (Runnable) args[0];
+      runnable.run();
+      return mock(ScheduledFuture.class);
+    });
     return service;
   }
 
   private void fireTimers(EpochTimeScheduler factory) {
     factory.removeReadyTimers().entrySet().forEach(entry -> {
-        entry.getValue().onCallback(entry.getKey().getKey(), mock(MessageCollector.class), mock(TaskCoordinator.class));
-      });
+      entry.getValue().onCallback(entry.getKey().getKey(), mock(MessageCollector.class), mock(TaskCoordinator.class));
+    });
   }
 
   @Test
@@ -70,11 +70,11 @@ public class TestEpochTimeScheduler {
     when(executor.schedule((Runnable) anyObject(), anyLong(), anyObject()))
         .thenReturn(mockScheduledFuture1)
         .thenAnswer(invocation -> {
-            Object[] args = invocation.getArguments();
-            Runnable runnable = (Runnable) args[0];
-            runnable.run();
-            return mockScheduledFuture2;
-          });
+          Object[] args = invocation.getArguments();
+          Runnable runnable = (Runnable) args[0];
+          runnable.run();
+          return mockScheduledFuture2;
+        });
 
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(executor);
     long timestamp = System.currentTimeMillis() + 10000;
@@ -144,11 +144,11 @@ public class TestEpochTimeScheduler {
     when(executor.schedule((Runnable) anyObject(), anyLong(), anyObject()))
         .thenReturn(mockScheduledFuture1)
         .thenAnswer(invocation -> {
-            Object[] args = invocation.getArguments();
-            Runnable runnable = (Runnable) args[0];
-            runnable.run();
-            return mockScheduledFuture2;
-          });
+          Object[] args = invocation.getArguments();
+          Runnable runnable = (Runnable) args[0];
+          runnable.run();
+          return mockScheduledFuture2;
+        });
 
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(executor);
     long timestamp = System.currentTimeMillis() + 10000;
@@ -182,8 +182,8 @@ public class TestEpochTimeScheduler {
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     List<String> results = new ArrayList<>();
     scheduler.setTimer("single-timer", 1, (key, collector, coordinator) -> {
-        results.add(key);
-      });
+      results.add(key);
+    });
 
     fireTimers(scheduler);
 
@@ -196,14 +196,14 @@ public class TestEpochTimeScheduler {
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     List<String> results = new ArrayList<>();
     scheduler.setTimer("multiple-timer-3", 3, (key, collector, coordinator) -> {
-        results.add(key + ":3");
-      });
+      results.add(key + ":3");
+    });
     scheduler.setTimer("multiple-timer-2", 2, (key, collector, coordinator) -> {
-        results.add(key + ":2");
-      });
+      results.add(key + ":2");
+    });
     scheduler.setTimer("multiple-timer-1", 1, (key, collector, coordinator) -> {
-        results.add(key + ":1");
-      });
+      results.add(key + ":1");
+    });
 
     fireTimers(scheduler);
 
@@ -221,13 +221,13 @@ public class TestEpochTimeScheduler {
 
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     scheduler.setTimer(key1, 2, (key, collector, coordinator) -> {
-        assertEquals(key, key1);
-        results.add("key1:2");
-      });
+      assertEquals(key, key1);
+      results.add("key1:2");
+    });
     scheduler.setTimer(key2, 1, (key, collector, coordinator) -> {
-        assertEquals(key, key2);
-        results.add("key2:1");
-      });
+      assertEquals(key, key2);
+      results.add("key2:1");
+    });
 
     fireTimers(scheduler);
 
@@ -244,13 +244,13 @@ public class TestEpochTimeScheduler {
 
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     scheduler.setTimer(key1, 1, (key, collector, coordinator) -> {
-        assertEquals(key, key1);
-        results.add("key:1");
-      });
+      assertEquals(key, key1);
+      results.add("key:1");
+    });
     scheduler.setTimer(key2, 2, (key, collector, coordinator) -> {
-        assertEquals(key.longValue(), Long.MAX_VALUE);
-        results.add(Long.MAX_VALUE + ":2");
-      });
+      assertEquals(key.longValue(), Long.MAX_VALUE);
+      results.add(Long.MAX_VALUE + ":2");
+    });
 
     fireTimers(scheduler);
 
@@ -269,8 +269,8 @@ public class TestEpochTimeScheduler {
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(service);
     List<String> results = new ArrayList<>();
     scheduler.setTimer("timer", 1, (key, collector, coordinator) -> {
-        results.add(key);
-      });
+      results.add(key);
+    });
 
     scheduler.deleteTimer("timer");
 
@@ -285,11 +285,11 @@ public class TestEpochTimeScheduler {
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     List<String> results = new ArrayList<>();
     scheduler.registerListener(() -> {
-        results.add("timer-listener");
-      });
+      results.add("timer-listener");
+    });
 
     scheduler.setTimer("timer-listener", 1, (key, collector, coordinator) -> {
-      });
+    });
 
     fireTimers(scheduler);
 
diff --git a/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputHandler.java b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputHandler.java
index 75562db..656b2ef 100644
--- a/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputHandler.java
+++ b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputHandler.java
@@ -68,7 +68,7 @@ public class TestTaskSideInputHandler {
         .collect(Collectors.toSet());
     Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = ssps.stream()
         .collect(Collectors.toMap(SystemStreamPartition::getPartition,
-            x -> new SystemStreamMetadata.SystemStreamPartitionMetadata(null, "1", "2")));
+          x -> new SystemStreamMetadata.SystemStreamPartitionMetadata(null, "1", "2")));
 
 
     TaskSideInputHandler handler = new MockTaskSideInputHandlerBuilder(taskName, TaskMode.Active)
@@ -80,10 +80,10 @@ public class TestTaskSideInputHandler {
     handler.init();
 
     ssps.forEach(ssp -> {
-        String startingOffset = handler.getStartingOffset(
-            new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, ssp.getPartition()));
-        Assert.assertNull("Starting offset should be null", startingOffset);
-      });
+      String startingOffset = handler.getStartingOffset(
+          new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, ssp.getPartition()));
+      Assert.assertNull("Starting offset should be null", startingOffset);
+    });
   }
 
   @Test
@@ -103,17 +103,17 @@ public class TestTaskSideInputHandler {
     // set up file and oldest offsets. for even partitions, fileOffsets will be larger; for odd partitions oldestOffsets will be larger
     Map<SystemStreamPartition, String> fileOffsets = ssps.stream()
         .collect(Collectors.toMap(Function.identity(), ssp -> {
-            int partitionId = ssp.getPartition().getPartitionId();
-            int offset = partitionId % 2 == 0 ? partitionId + 10 : partitionId;
-            return String.valueOf(offset);
-          }));
+          int partitionId = ssp.getPartition().getPartitionId();
+          int offset = partitionId % 2 == 0 ? partitionId + 10 : partitionId;
+          return String.valueOf(offset);
+        }));
     Map<SystemStreamPartition, String> oldestOffsets = ssps.stream()
         .collect(Collectors.toMap(Function.identity(), ssp -> {
-            int partitionId = ssp.getPartition().getPartitionId();
-            int offset = partitionId % 2 == 0 ? partitionId : partitionId + 10;
+          int partitionId = ssp.getPartition().getPartitionId();
+          int offset = partitionId % 2 == 0 ? partitionId : partitionId + 10;
 
-            return String.valueOf(offset);
-          }));
+          return String.valueOf(offset);
+        }));
 
     doCallRealMethod().when(handler).getStartingOffsets(fileOffsets, oldestOffsets);
 
@@ -121,14 +121,14 @@ public class TestTaskSideInputHandler {
 
     assertTrue("Failed to get starting offsets for all ssps", startingOffsets.size() == 5);
     startingOffsets.forEach((ssp, offset) -> {
-        int partitionId = ssp.getPartition().getPartitionId();
-        String expectedOffset = partitionId % 2 == 0
-            // 1 + fileOffset
-            ? getOffsetAfter(String.valueOf(ssp.getPartition().getPartitionId() + 10))
-            // oldestOffset
-            : String.valueOf(ssp.getPartition().getPartitionId() + 10);
-        assertEquals("Larger of fileOffsets and oldestOffsets should always be chosen", expectedOffset, offset);
-      });
+      int partitionId = ssp.getPartition().getPartitionId();
+      String expectedOffset = partitionId % 2 == 0
+          // 1 + fileOffset
+          ? getOffsetAfter(String.valueOf(ssp.getPartition().getPartitionId() + 10))
+          // oldestOffset
+          : String.valueOf(ssp.getPartition().getPartitionId() + 10);
+      assertEquals("Larger of fileOffsets and oldestOffsets should always be chosen", expectedOffset, offset);
+    });
   }
 
   private static final class MockTaskSideInputHandlerBuilder {
@@ -154,19 +154,18 @@ public class TestTaskSideInputHandler {
     private void initializeMocks() {
       SystemAdmin admin = mock(SystemAdmin.class);
       doAnswer(invocation -> {
-          String offset1 = invocation.getArgumentAt(0, String.class);
-          String offset2 = invocation.getArgumentAt(1, String.class);
+        String offset1 = invocation.getArgumentAt(0, String.class);
+        String offset2 = invocation.getArgumentAt(1, String.class);
 
-          return Long.compare(Long.parseLong(offset1), Long.parseLong(offset2));
-        }).when(admin).offsetComparator(any(), any());
+        return Long.compare(Long.parseLong(offset1), Long.parseLong(offset2));
+      }).when(admin).offsetComparator(any(), any());
       doAnswer(invocation -> {
-          Map<SystemStreamPartition, String> sspToOffsets = invocation.getArgumentAt(0, Map.class);
+        Map<SystemStreamPartition, String> sspToOffsets = invocation.getArgumentAt(0, Map.class);
 
-          return sspToOffsets.entrySet()
-              .stream()
-              .collect(Collectors.toMap(Map.Entry::getKey,
-                  entry -> getOffsetAfter(entry.getValue())));
-        }).when(admin).getOffsetsAfter(any());
+        return sspToOffsets.entrySet()
+            .stream()
+            .collect(Collectors.toMap(Map.Entry::getKey, entry -> getOffsetAfter(entry.getValue())));
+      }).when(admin).getOffsetsAfter(any());
       doReturn(admin).when(systemAdmins).getSystemAdmin(TEST_SYSTEM);
       doReturn(ScalaJavaUtil.toScalaMap(new HashMap<>())).when(streamMetadataCache).getStreamMetadata(any(), anyBoolean());
     }
diff --git a/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java
index 9c41e85..0412154 100644
--- a/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java
+++ b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java
@@ -178,9 +178,9 @@ public class TestTaskSideInputStorageManager {
 
     Map<SystemStreamPartition, String> fileOffsets = testSideInputStorageManager.getFileOffsets();
     ssps.forEach(ssp -> {
-        assertTrue("Failed to get offset for ssp: " + ssp.toString() + " from file.", fileOffsets.containsKey(ssp));
-        assertEquals("Mismatch between last processed offset and file offset.", fileOffsets.get(ssp), offset);
-      });
+      assertTrue("Failed to get offset for ssp: " + ssp.toString() + " from file.", fileOffsets.containsKey(ssp));
+      assertEquals("Mismatch between last processed offset and file offset.", fileOffsets.get(ssp), offset);
+    });
   }
 
   private void initializeSideInputStorageManager(TaskSideInputStorageManager testSideInputStorageManager) {
diff --git a/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java b/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java
index c37cca3..2bdd6c3 100644
--- a/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java
+++ b/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java
@@ -205,10 +205,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     File dummyCurrentDir = new File("currentDir");
     File dummyCheckpointDir = new File("checkpointDir1");
@@ -273,10 +273,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -339,10 +339,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -406,10 +406,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -474,10 +474,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -546,10 +546,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -610,10 +610,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -673,10 +673,10 @@ public class TestTransactionalStateTaskRestoreManager {
         .thenReturn(mockCurrentStoreDir);
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -754,10 +754,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -837,10 +837,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -921,10 +921,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1006,10 +1006,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1091,10 +1091,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1185,10 +1185,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1280,10 +1280,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1367,13 +1367,13 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            if (offset1 == null || offset2 == null) {
-              return -1;
-            }
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          if (offset1 == null || offset2 == null) {
+            return -1;
+          }
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1462,13 +1462,13 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            if (offset1 == null || offset2 == null) {
-              return -1;
-            }
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          if (offset1 == null || offset2 == null) {
+            return -1;
+          }
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1555,10 +1555,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1646,10 +1646,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1736,10 +1736,10 @@ public class TestTransactionalStateTaskRestoreManager {
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1897,17 +1897,17 @@ public class TestTransactionalStateTaskRestoreManager {
     when(mockSystemAdmins.getSystemAdmin(eq(changelogSystemName))).thenReturn(mockSystemAdmin);
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
     Mockito.when(mockSystemAdmin.getOffsetsAfter(any()))
         .thenAnswer((Answer<Map<SystemStreamPartition, String>>) invocation -> {
-            Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
-            Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
-            offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
-            return nextOffsets;
-          });
+          Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
+          Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
+          offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
+          return nextOffsets;
+        });
 
     SystemConsumer mockSystemConsumer = mock(SystemConsumer.class);
     Map<String, SystemConsumer> mockStoreConsumers = ImmutableMap.of(
@@ -1962,17 +1962,17 @@ public class TestTransactionalStateTaskRestoreManager {
     when(mockSystemAdmins.getSystemAdmin(eq(changelogSystemName))).thenReturn(mockSystemAdmin);
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
     Mockito.when(mockSystemAdmin.getOffsetsAfter(any()))
         .thenAnswer((Answer<Map<SystemStreamPartition, String>>) invocation -> {
-            Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
-            Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
-            offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
-            return nextOffsets;
-          });
+          Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
+          Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
+          offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
+          return nextOffsets;
+        });
 
     SystemConsumer mockSystemConsumer = mock(SystemConsumer.class);
     Map<String, SystemConsumer> mockStoreConsumers = ImmutableMap.of("store1", mockSystemConsumer);
diff --git a/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java b/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java
index 9c8dc58..73cae4e 100644
--- a/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java
+++ b/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java
@@ -54,13 +54,13 @@ public class MockSystemFactory implements SystemFactory {
       public Map<SystemStreamPartition, List<IncomingMessageEnvelope>> poll(Set<SystemStreamPartition> systemStreamPartitions, long timeout) {
         Map<SystemStreamPartition, List<IncomingMessageEnvelope>> retQueues = new HashMap<>();
         systemStreamPartitions.forEach(ssp -> {
-            List<IncomingMessageEnvelope> msgs = MSG_QUEUES.get(ssp);
-            if (msgs == null) {
-              retQueues.put(ssp, new ArrayList<>());
-            } else {
-              retQueues.put(ssp, MSG_QUEUES.remove(ssp));
-            }
-          });
+          List<IncomingMessageEnvelope> msgs = MSG_QUEUES.get(ssp);
+          if (msgs == null) {
+            retQueues.put(ssp, new ArrayList<>());
+          } else {
+            retQueues.put(ssp, MSG_QUEUES.remove(ssp));
+          }
+        });
         return retQueues;
       }
     };
@@ -124,30 +124,32 @@ public class MockSystemFactory implements SystemFactory {
         Map<String, Set<Partition>> partitionMap = MSG_QUEUES.entrySet()
             .stream()
             .filter(entry -> streamNames.contains(entry.getKey().getSystemStream().getStream()))
-            .map(e -> e.getKey()).<Map<String, Set<Partition>>>collect(HashMap::new, (m, ssp) -> {
+            .map(e -> e.getKey())
+            .<Map<String, Set<Partition>>>collect(HashMap::new,
+              (m, ssp) -> {
                 if (m.get(ssp.getStream()) == null) {
                   m.put(ssp.getStream(), new HashSet<>());
                 }
                 m.get(ssp.getStream()).add(ssp.getPartition());
               }, (m1, m2) -> {
                 m2.forEach((k, v) -> {
-                    if (m1.get(k) == null) {
-                      m1.put(k, v);
-                    } else {
-                      m1.get(k).addAll(v);
-                    }
-                  });
+                  if (m1.get(k) == null) {
+                    m1.put(k, v);
+                  } else {
+                    m1.get(k).addAll(v);
+                  }
+                });
               });
 
         partitionMap.forEach((k, v) -> {
-            Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetaMap =
-                v.stream().<Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata>>collect(HashMap::new,
-                  (m, p) -> {
-                    m.put(p, new SystemStreamMetadata.SystemStreamPartitionMetadata("", "", ""));
-                  }, (m1, m2) -> m1.putAll(m2));
-
-            metadataMap.put(k, new SystemStreamMetadata(k, partitionMetaMap));
-          });
+          Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetaMap =
+              v.stream().<Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata>>collect(HashMap::new,
+                (m, p) -> {
+                  m.put(p, new SystemStreamMetadata.SystemStreamPartitionMetadata("", "", ""));
+                }, (m1, m2) -> m1.putAll(m2));
+
+          metadataMap.put(k, new SystemStreamMetadata(k, partitionMetaMap));
+        });
 
         return metadataMap;
       }
diff --git a/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java b/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java
index efe09d1..f08d069 100644
--- a/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java
+++ b/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java
@@ -241,12 +241,12 @@ public class TestSSPMetadataCache {
     SSPMetadataCache cache = buildSSPMetadataCache(ssps);
     ExecutorService executorService = Executors.newFixedThreadPool(10);
     when(systemAdmin.getSSPMetadata(ssps)).thenAnswer(invocation -> {
-        // have the admin call wait so that it forces the threads to overlap on the lock
-        Thread.sleep(500);
-        return IntStream.range(0, numPartitions)
-            .boxed()
-            .collect(Collectors.toMap(TestSSPMetadataCache::buildSSP, i -> sspMetadata((long) i)));
-      });
+      // have the admin call wait so that it forces the threads to overlap on the lock
+      Thread.sleep(500);
+      return IntStream.range(0, numPartitions)
+          .boxed()
+          .collect(Collectors.toMap(TestSSPMetadataCache::buildSSP, i -> sspMetadata((long) i)));
+    });
 
     // send concurrent requests for metadata
     List<Future<SystemStreamMetadata.SystemStreamPartitionMetadata>> getMetadataFutures =
diff --git a/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java b/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java
index e436a06..04656a5 100644
--- a/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java
+++ b/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java
@@ -118,21 +118,21 @@ public class TestCachingTable {
     final ReadWriteTable cacheTable = mock(ReadWriteTable.class);
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        String value = invocation.getArgumentAt(1, String.class);
-        cacheStore.put(key, value);
-        return null;
-      }).when(cacheTable).put(any(), any());
+      String key = invocation.getArgumentAt(0, String.class);
+      String value = invocation.getArgumentAt(1, String.class);
+      cacheStore.put(key, value);
+      return null;
+    }).when(cacheTable).put(any(), any());
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        return cacheStore.get(key);
-      }).when(cacheTable).get(any());
+      String key = invocation.getArgumentAt(0, String.class);
+      return cacheStore.get(key);
+    }).when(cacheTable).get(any());
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        return cacheStore.remove(key);
-      }).when(cacheTable).delete(any());
+      String key = invocation.getArgumentAt(0, String.class);
+      return cacheStore.remove(key);
+    }).when(cacheTable).delete(any());
 
     return Pair.of(cacheTable, cacheStore);
   }
@@ -171,24 +171,24 @@ public class TestCachingTable {
     final ReadWriteTable realTable = mock(ReadWriteTable.class);
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        return CompletableFuture.completedFuture("test-data-" + key);
-      }).when(realTable).getAsync(any());
+      String key = invocation.getArgumentAt(0, String.class);
+      return CompletableFuture.completedFuture("test-data-" + key);
+    }).when(realTable).getAsync(any());
 
     doReturn(CompletableFuture.completedFuture(null)).when(realTable).putAsync(any(), any());
 
     doAnswer(invocation -> {
-        String tableId = invocation.getArgumentAt(0, String.class);
-        if (tableId.equals("realTable")) {
-          // cache
-          return realTable;
-        } else if (tableId.equals("cacheTable")) {
-          return cacheTable;
-        }
-
-        Assert.fail();
-        return null;
-      }).when(context.getTaskContext()).getTable(anyString());
+      String tableId = invocation.getArgumentAt(0, String.class);
+      if (tableId.equals("realTable")) {
+        // cache
+        return realTable;
+      } else if (tableId.equals("cacheTable")) {
+        return cacheTable;
+      }
+
+      Assert.fail();
+      return null;
+    }).when(context.getTaskContext()).getTable(anyString());
 
     when(context.getContainerContext().getContainerMetricsRegistry()).thenReturn(new NoOpMetricsRegistry());
 
diff --git a/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java b/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java
index 718aa2c..ed49cf8 100644
--- a/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java
+++ b/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java
@@ -194,9 +194,9 @@ public class TestRemoteTable {
 
     CompletableFuture.allOf(future1, future2)
         .thenAccept(u -> {
-            Assert.assertEquals(future1.join(), "bar1");
-            Assert.assertEquals(future2.join(), "bar1");
-          });
+          Assert.assertEquals(future1.join(), "bar1");
+          Assert.assertEquals(future2.join(), "bar1");
+        });
   }
 
   public void doTestRead(boolean sync, boolean error) {
@@ -556,10 +556,10 @@ public class TestRemoteTable {
     Thread testThread = Thread.currentThread();
 
     table.getAsync("foo").thenAccept(result -> {
-        Assert.assertEquals("bar", result);
-        // Must be executed on the executor thread
-        Assert.assertNotSame(testThread, Thread.currentThread());
-      });
+      Assert.assertEquals("bar", result);
+      // Must be executed on the executor thread
+      Assert.assertNotSame(testThread, Thread.currentThread());
+    });
   }
 
   @Test
diff --git a/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java b/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java
index ec4307d..f80f623 100644
--- a/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java
+++ b/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java
@@ -188,15 +188,15 @@ public class TestAsyncRetriableTable {
     map.put("foo1", "bar1");
     map.put("foo2", "bar2");
     doAnswer(invocation -> {
-        CompletableFuture<Map<String, String>> future = new CompletableFuture();
-        if (times.get() > 0) {
-          future.complete(map);
-        } else {
-          times.incrementAndGet();
-          future.completeExceptionally(new RuntimeException("test exception"));
-        }
-        return future;
-      }).when(readFn).getAllAsync(anyCollection());
+      CompletableFuture<Map<String, String>> future = new CompletableFuture();
+      if (times.get() > 0) {
+        future.complete(map);
+      } else {
+        times.incrementAndGet();
+        future.completeExceptionally(new RuntimeException("test exception"));
+      }
+      return future;
+    }).when(readFn).getAllAsync(anyCollection());
 
     AsyncReadWriteTable delegate = new AsyncRemoteTable(readFn, null);
     AsyncRetriableTable table = new AsyncRetriableTable("t1", delegate, policy, null, schedExec, readFn, null);
@@ -399,15 +399,15 @@ public class TestAsyncRetriableTable {
 
     AtomicInteger times = new AtomicInteger();
     doAnswer(invocation -> {
-        CompletableFuture<Map<String, String>> future = new CompletableFuture();
-        if (times.get() > 0) {
-          future.complete(null);
-        } else {
-          times.incrementAndGet();
-          future.completeExceptionally(new RuntimeException("test exception"));
-        }
-        return future;
-      }).when(writeFn).putAllAsync(any());
+      CompletableFuture<Map<String, String>> future = new CompletableFuture();
+      if (times.get() > 0) {
+        future.complete(null);
+      } else {
+        times.incrementAndGet();
+        future.completeExceptionally(new RuntimeException("test exception"));
+      }
+      return future;
+    }).when(writeFn).putAllAsync(any());
 
     AsyncReadWriteTable delegate = new AsyncRemoteTable(readFn, writeFn);
     AsyncRetriableTable table = new AsyncRetriableTable("t1", delegate, null, policy, schedExec, readFn, writeFn);
diff --git a/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java b/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java
index 330cab9..2d43c63 100644
--- a/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java
+++ b/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java
@@ -72,9 +72,9 @@ public class TestStreamOperatorTask {
     CountDownLatch failureLatch = new CountDownLatch(1);
 
     doAnswer(ctx -> {
-        failureLatch.countDown();
-        return null;
-      }).when(mockTaskCallback).failure(anyObject());
+      failureLatch.countDown();
+      return null;
+    }).when(mockTaskCallback).failure(anyObject());
 
     operatorTask.processAsync(mock(IncomingMessageEnvelope.class), mockMessageCollector,
         mockTaskCoordinator, mockTaskCallback);
diff --git a/samza-core/src/test/java/org/apache/samza/util/TestSplitDeploymentUtil.java b/samza-core/src/test/java/org/apache/samza/util/TestSplitDeploymentUtil.java
index 1336190..d1dd8f8 100644
--- a/samza-core/src/test/java/org/apache/samza/util/TestSplitDeploymentUtil.java
+++ b/samza-core/src/test/java/org/apache/samza/util/TestSplitDeploymentUtil.java
@@ -49,11 +49,11 @@ public class TestSplitDeploymentUtil {
     // stub the private static method which is called by reflection
     PowerMockito.doAnswer(invocation -> {
         // make sure the only calls to this method has the expected arguments
-        assertArrayEquals(args, invocation.getArgumentAt(0, String[].class));
-        // checks that the context classloader is set correctly
-        assertEquals(classLoader, Thread.currentThread().getContextClassLoader());
-        return null;
-      }).when(ClusterBasedJobCoordinator.class, "runClusterBasedJobCoordinator", any());
+      assertArrayEquals(args, invocation.getArgumentAt(0, String[].class));
+      // checks that the context classloader is set correctly
+      assertEquals(classLoader, Thread.currentThread().getContextClassLoader());
+      return null;
+    }).when(ClusterBasedJobCoordinator.class, "runClusterBasedJobCoordinator", any());
 
     try {
       SplitDeploymentUtil.runWithClassLoader(classLoader,
diff --git a/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java b/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java
index 67b2d45..c63e66b 100644
--- a/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java
+++ b/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java
@@ -59,9 +59,9 @@ public class TestScheduleAfterDebounceTime {
 
     final TestObj testObj = new TestScheduleAfterDebounceTime.TestObj();
     scheduledQueue.scheduleAfterDebounceTime("TEST1", WAIT_TIME, () -> {
-        testObj.inc();
-        latch.countDown();
-      });
+      testObj.inc();
+      latch.countDown();
+    });
     // action is delayed
     Assert.assertEquals(0, testObj.get());
 
@@ -105,14 +105,13 @@ public class TestScheduleAfterDebounceTime {
     final Throwable[] taskCallbackException = new Exception[1];
     ScheduleAfterDebounceTime scheduledQueue = new ScheduleAfterDebounceTime(TEST_PROCESSOR_ID);
     scheduledQueue.setScheduledTaskCallback(throwable -> {
-        taskCallbackException[0] = throwable;
-        latch.countDown();
-      });
+      taskCallbackException[0] = throwable;
+      latch.countDown();
+    });
 
-    scheduledQueue.scheduleAfterDebounceTime("TEST1", WAIT_TIME, () ->
-      {
-        throw new RuntimeException("From the runnable!");
-      });
+    scheduledQueue.scheduleAfterDebounceTime("TEST1", WAIT_TIME, () -> {
+      throw new RuntimeException("From the runnable!");
+    });
 
     final TestObj testObj = new TestObj();
     scheduledQueue.scheduleAfterDebounceTime("TEST2", WAIT_TIME * 2, testObj::inc);
diff --git a/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java b/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java
index d98392d..13fc13b 100644
--- a/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java
+++ b/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java
@@ -542,13 +542,13 @@ public class TestZkUtils {
     ZkUtils zkUtils = new ZkUtils(KEY_BUILDER, zkClient, CONNECTION_TIMEOUT_MS, SESSION_TIMEOUT_MS, new NoOpMetricsRegistry());
 
     Thread threadToInterrupt = new Thread(() -> {
-        try {
-          latch.await();
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-        zkUtils.close();
-      });
+      try {
+        latch.await();
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+      zkUtils.close();
+    });
 
     threadToInterrupt.start();
 
diff --git a/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java b/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java
index bdc7e0e..c36a3be 100644
--- a/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java
+++ b/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java
@@ -87,8 +87,8 @@ public class TestContainerStorageManager {
   private void addMockedTask(String taskname, int changelogPartition) {
     TaskInstance mockTaskInstance = mock(TaskInstance.class);
     doAnswer(invocation -> {
-        return new TaskName(taskname);
-      }).when(mockTaskInstance).taskName();
+      return new TaskName(taskname);
+    }).when(mockTaskInstance).taskName();
 
     Gauge testGauge = mock(Gauge.class);
     this.tasks.put(new TaskName(taskname),
@@ -126,33 +126,33 @@ public class TestContainerStorageManager {
     when(mockStorageEngine.getStoreProperties())
         .thenReturn(new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).setPersistedToDisk(true).build());
     doAnswer(invocation -> {
-        return mockStorageEngine;
-      }).when(mockStorageEngineFactory).getStorageEngine(anyString(), any(), any(), any(), any(),
-            any(), any(), any(), any(), any());
+      return mockStorageEngine;
+    }).when(mockStorageEngineFactory).getStorageEngine(anyString(), any(), any(), any(), any(),
+        any(), any(), any(), any(), any());
 
     storageEngineFactories.put(STORE_NAME, mockStorageEngineFactory);
 
     // Add instrumentation to mocked storage engine, to record the number of store.restore() calls
     doAnswer(invocation -> {
-        storeRestoreCallCount++;
-        return null;
-      }).when(mockStorageEngine).restore(any());
+      storeRestoreCallCount++;
+      return null;
+    }).when(mockStorageEngine).restore(any());
 
     // Set the mocked stores' properties to be persistent
     doAnswer(invocation -> {
-        return new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).build();
-      }).when(mockStorageEngine).getStoreProperties();
+      return new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).build();
+    }).when(mockStorageEngine).getStoreProperties();
 
     // Mock and setup sysconsumers
     SystemConsumer mockSystemConsumer = mock(SystemConsumer.class);
     doAnswer(invocation -> {
-        systemConsumerStartCount++;
-        return null;
-      }).when(mockSystemConsumer).start();
+      systemConsumerStartCount++;
+      return null;
+    }).when(mockSystemConsumer).start();
     doAnswer(invocation -> {
-        systemConsumerStopCount++;
-        return null;
-      }).when(mockSystemConsumer).stop();
+      systemConsumerStopCount++;
+      return null;
+    }).when(mockSystemConsumer).stop();
 
     // Create mocked system factories
     Map<String, SystemFactory> systemFactories = new HashMap<>();
@@ -160,9 +160,9 @@ public class TestContainerStorageManager {
     // Count the number of sysConsumers created
     SystemFactory mockSystemFactory = mock(SystemFactory.class);
     doAnswer(invocation -> {
-        this.systemConsumerCreationCount++;
-        return mockSystemConsumer;
-      }).when(mockSystemFactory).getConsumer(anyString(), any(), any());
+      this.systemConsumerCreationCount++;
+      return mockSystemConsumer;
+    }).when(mockSystemFactory).getConsumer(anyString(), any(), any());
 
     systemFactories.put(SYSTEM_NAME, mockSystemFactory);