You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2017/04/04 03:20:01 UTC

hadoop git commit: HADOOP-14271. Correct spelling of 'occurred' and variants. Contributed by Yeliang Cang

Repository: hadoop
Updated Branches:
  refs/heads/trunk 5faa949b7 -> 6eba79232


HADOOP-14271. Correct spelling of 'occurred' and variants. Contributed by Yeliang Cang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eba7923
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eba7923
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eba7923

Branch: refs/heads/trunk
Commit: 6eba79232f36b36e0196163adc8fe4219a6b6bf9
Parents: 5faa949
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Apr 3 20:13:14 2017 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Apr 3 20:13:14 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/util/Progressable.java    |  2 +-
 .../apache/hadoop/util/UTF8ByteArrayUtils.java  |  4 +--
 .../src/main/native/gtest/gtest-all.cc          |  2 +-
 .../src/main/native/gtest/include/gtest/gtest.h |  2 +-
 .../hadoop/test/MultithreadedTestUtil.java      |  2 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  2 +-
 .../hadoop/hdfs/DFSStripedInputStream.java      |  2 +-
 .../hadoop/fs/http/server/FSOperations.java     | 34 ++++++++++----------
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  2 +-
 .../mapreduce/lib/jobcontrol/JobControl.java    |  2 +-
 .../hadoop/fs/azure/BlockBlobAppendStream.java  |  2 +-
 .../hadoop/streaming/StreamKeyValUtil.java      |  8 ++---
 .../distributedshell/TestDistributedShell.java  | 30 ++++++++---------
 .../launcher/TestContainerLaunch.java           | 12 +++----
 .../scheduler/capacity/TestParentQueue.java     | 18 +++++------
 16 files changed, 63 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
index 495ca82..201ee5c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * to explicitly report progress to the Hadoop framework. This is especially
  * important for operations which take significant amount of time since,
  * in-lieu of the reported progress, the framework has to assume that an error
- * has occured and time-out the operation.</p>
+ * has occurred and time-out the operation.</p>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
index 2a804c6..069494f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
@@ -30,7 +30,7 @@ public class UTF8ByteArrayUtils {
    * @param start starting offset
    * @param end ending position
    * @param b the byte to find
-   * @return position that first byte occures otherwise -1
+   * @return position that first byte occurs, otherwise -1
    */
   public static int findByte(byte [] utf, int start, int end, byte b) {
     for(int i=start; i<end; i++) {
@@ -47,7 +47,7 @@ public class UTF8ByteArrayUtils {
    * @param start starting offset
    * @param end ending position
    * @param b the bytes to find
-   * @return position that first byte occures otherwise -1
+   * @return position that first byte occurs, otherwise -1
    */
   public static int findBytes(byte [] utf, int start, int end, byte[] b) {
     int matchEnd = end - b.length;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc b/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc
index 4f8c08a..fb6ddbc 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc
+++ b/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc
@@ -8298,7 +8298,7 @@ FilePath FilePath::RemoveExtension(const char* extension) const {
   return *this;
 }
 
-// Returns a pointer to the last occurence of a valid path separator in
+// Returns a pointer to the last occurrence of a valid path separator in
 // the FilePath. On Windows, for example, both '/' and '\' are valid path
 // separators. Returns NULL if no path separator was found.
 const char* FilePath::FindLastPathSeparator() const {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h b/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
index c04205d..3d795bf 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
@@ -4457,7 +4457,7 @@ class GTEST_API_ FilePath {
 
   void Normalize();
 
-  // Returns a pointer to the last occurence of a valid path separator in
+  // Returns a pointer to the last occurrence of a valid path separator in
   // the FilePath. On Windows, for example, both '/' and '\' are valid path
   // separators. Returns NULL if no path separator was found.
   const char* FindLastPathSeparator() const;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
index e0bc136..b51329f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
@@ -225,7 +225,7 @@ public abstract class MultithreadedTestUtil {
 
     /**
      * User method for any code to test repeating behavior of (as threads).
-     * @throws Exception throw an exception if a failure has occured.
+     * @throws Exception throw an exception if a failure has occurred.
      */
     public abstract void doAnAction() throws Exception;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 39d0eed..d388d00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -777,7 +777,7 @@ public class DFSInputStream extends FSInputStream
           }
         } finally {
           // Check if need to report block replicas corruption either read
-          // was successful or ChecksumException occured.
+          // was successful or ChecksumException occurred.
           reportCheckSumFailure(corruptedBlocks,
               currentLocatedBlock.getLocations().length, false);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 07e6609..75ad022 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -394,7 +394,7 @@ public class DFSStripedInputStream extends DFSInputStream {
         return result;
       } finally {
         // Check if need to report block replicas corruption either read
-        // was successful or ChecksumException occured.
+        // was successful or ChecksumException occurred.
         reportCheckSumFailure(corruptedBlocks,
             currentLocatedBlock.getLocations().length, true);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 5ff42a4..3373582 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -333,7 +333,7 @@ public class FSOperations {
      *
      * @return void.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Void execute(FileSystem fs) throws IOException {
@@ -377,7 +377,7 @@ public class FSOperations {
      *
      * @return void.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Void execute(FileSystem fs) throws IOException {
@@ -418,7 +418,7 @@ public class FSOperations {
      *         wait for it to complete before proceeding with further file 
      *         updates.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public JSONObject execute(FileSystem fs) throws IOException {
@@ -452,7 +452,7 @@ public class FSOperations {
      *
      * @return a Map object (JSON friendly) with the content-summary.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
@@ -501,7 +501,7 @@ public class FSOperations {
      *
      * @return The URI of the created file.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Void execute(FileSystem fs) throws IOException {
@@ -549,7 +549,7 @@ public class FSOperations {
      * @return <code>true</code> if the delete operation was successful,
      *         <code>false</code> otherwise.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public JSONObject execute(FileSystem fs) throws IOException {
@@ -583,7 +583,7 @@ public class FSOperations {
      *
      * @return a Map object (JSON friendly) with the file checksum.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
@@ -640,7 +640,7 @@ public class FSOperations {
      *
      * @return a JSON object with the user home directory.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     @SuppressWarnings("unchecked")
@@ -765,7 +765,7 @@ public class FSOperations {
      * @return <code>true</code> if the mkdirs operation was successful,
      *         <code>false</code> otherwise.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public JSONObject execute(FileSystem fs) throws IOException {
@@ -799,7 +799,7 @@ public class FSOperations {
      *
      * @return The inputstream of the file.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public InputStream execute(FileSystem fs) throws IOException {
@@ -837,7 +837,7 @@ public class FSOperations {
      * @return <code>true</code> if the rename operation was successful,
      *         <code>false</code> otherwise.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public JSONObject execute(FileSystem fs) throws IOException {
@@ -876,7 +876,7 @@ public class FSOperations {
      *
      * @return void.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Void execute(FileSystem fs) throws IOException {
@@ -913,7 +913,7 @@ public class FSOperations {
      *
      * @return void.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Void execute(FileSystem fs) throws IOException {
@@ -1186,7 +1186,7 @@ public class FSOperations {
      * @return <code>true</code> if the replication value was set,
      *         <code>false</code> otherwise.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     @SuppressWarnings("unchecked")
@@ -1228,7 +1228,7 @@ public class FSOperations {
      *
      * @return void.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Void execute(FileSystem fs) throws IOException {
@@ -1314,7 +1314,7 @@ public class FSOperations {
      *
      * @return Map a map object (JSON friendly) with the xattr names.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
@@ -1353,7 +1353,7 @@ public class FSOperations {
      *
      * @return Map a map object (JSON friendly) with the xattrs.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index c8df300..c17ef36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -822,7 +822,7 @@ class BlockPoolSlice {
     } catch (Exception e) {
       // Any exception we need to revert back to read from disk
       // Log the error and return false
-      LOG.info("Exception occured while reading the replicas cache file: "
+      LOG.info("Exception occurred while reading the replicas cache file: "
           + replicaFile.getPath(), e );
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index f6e6a59..b948fb7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -1323,7 +1323,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
       fileNames = fileIoProvider.listDirectory(
           this, dir, BlockDirFilter.INSTANCE);
     } catch (IOException ioe) {
-      LOG.warn("Exception occured while compiling report: ", ioe);
+      LOG.warn("Exception occurred while compiling report: ", ioe);
       // Volume error check moved to FileIoProvider.
       // Ignore this directory and proceed.
       return report;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
index b0b7a3c..e5399b5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
@@ -276,7 +276,7 @@ public class JobControl implements Runnable {
   }
 
   synchronized private void failAllJobs(Throwable t) {
-    String message = "Unexpected System Error Occured: "+
+    String message = "Unexpected System Error Occurred: "+
     StringUtils.stringifyException(t);
     Iterator<ControlledJob> it = jobsInProgress.iterator();
     while(it.hasNext()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
index a7e286c..afb9379 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
@@ -346,7 +346,7 @@ public class BlockBlobAppendStream extends OutputStream {
 
     try {
       if (!ioThreadPool.awaitTermination(10, TimeUnit.MINUTES)) {
-        LOG.error("Time out occured while waiting for IO request to finish in append"
+        LOG.error("Time out occurred while waiting for IO request to finish in append"
             + " for blob : {}", key);
         NativeAzureFileSystemHelper.logAllLiveStackTraces();
         throw new IOException("Timed out waiting for IO requests to finish");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
index 75e05dc..fba45b1 100644
--- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
+++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
@@ -26,11 +26,11 @@ import org.apache.hadoop.util.LineReader;
 public class StreamKeyValUtil {
 
   /**
-   * Find the first occured tab in a UTF-8 encoded string
+   * Find the first occurred tab in a UTF-8 encoded string
    * @param utf a byte array containing a UTF-8 encoded string
    * @param start starting offset
    * @param length no. of bytes
-   * @return position that first tab occures otherwise -1
+   * @return position that first tab occurres otherwise -1
    */
   public static int findTab(byte [] utf, int start, int length) {
     for(int i=start; i<(start+length); i++) {
@@ -41,9 +41,9 @@ public class StreamKeyValUtil {
     return -1;      
   }
   /**
-   * Find the first occured tab in a UTF-8 encoded string
+   * Find the first occurred tab in a UTF-8 encoded string
    * @param utf a byte array containing a UTF-8 encoded string
-   * @return position that first tab occures otherwise -1
+   * @return position that first tab occurres otherwise -1
    */
   public static int findTab(byte [] utf) {
     return org.apache.hadoop.util.UTF8ByteArrayUtils.findNthByte(utf, 0, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 300ea67..ef21c87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -544,17 +544,17 @@ public class TestDistributedShell {
       Assert.assertEquals(
           "Container created event needs to be published atleast once",
           1,
-          getNumOfStringOccurences(containerEntityFile,
+          getNumOfStringOccurrences(containerEntityFile,
               ContainerMetricsConstants.CREATED_EVENT_TYPE));
 
       // to avoid race condition of testcase, atleast check 4 times with sleep
       // of 500ms
-      long numOfContainerFinishedOccurences = 0;
+      long numOfContainerFinishedOccurrences = 0;
       for (int i = 0; i < 4; i++) {
-        numOfContainerFinishedOccurences =
-            getNumOfStringOccurences(containerEntityFile,
+        numOfContainerFinishedOccurrences =
+            getNumOfStringOccurrences(containerEntityFile,
                 ContainerMetricsConstants.FINISHED_EVENT_TYPE);
-        if (numOfContainerFinishedOccurences > 0) {
+        if (numOfContainerFinishedOccurrences > 0) {
           break;
         } else {
           Thread.sleep(500L);
@@ -563,7 +563,7 @@ public class TestDistributedShell {
       Assert.assertEquals(
           "Container finished event needs to be published atleast once",
           1,
-          numOfContainerFinishedOccurences);
+          numOfContainerFinishedOccurrences);
 
       // Verify RM posting Application life cycle Events are getting published
       String appMetricsTimestampFileName =
@@ -576,17 +576,17 @@ public class TestDistributedShell {
       Assert.assertEquals(
           "Application created event should be published atleast once",
           1,
-          getNumOfStringOccurences(appEntityFile,
+          getNumOfStringOccurrences(appEntityFile,
               ApplicationMetricsConstants.CREATED_EVENT_TYPE));
 
       // to avoid race condition of testcase, atleast check 4 times with sleep
       // of 500ms
-      long numOfStringOccurences = 0;
+      long numOfStringOccurrences = 0;
       for (int i = 0; i < 4; i++) {
-        numOfStringOccurences =
-            getNumOfStringOccurences(appEntityFile,
+        numOfStringOccurrences =
+            getNumOfStringOccurrences(appEntityFile,
                 ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
-        if (numOfStringOccurences > 0) {
+        if (numOfStringOccurrences > 0) {
           break;
         } else {
           Thread.sleep(500L);
@@ -595,7 +595,7 @@ public class TestDistributedShell {
       Assert.assertEquals(
           "Application finished event should be published atleast once",
           1,
-          numOfStringOccurences);
+          numOfStringOccurrences);
 
       // Verify RM posting AppAttempt life cycle Events are getting published
       String appAttemptMetricsTimestampFileName =
@@ -609,13 +609,13 @@ public class TestDistributedShell {
       Assert.assertEquals(
           "AppAttempt register event should be published atleast once",
           1,
-          getNumOfStringOccurences(appAttemptEntityFile,
+          getNumOfStringOccurrences(appAttemptEntityFile,
               AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE));
 
       Assert.assertEquals(
           "AppAttempt finished event should be published atleast once",
           1,
-          getNumOfStringOccurences(appAttemptEntityFile,
+          getNumOfStringOccurrences(appAttemptEntityFile,
               AppAttemptMetricsConstants.FINISHED_EVENT_TYPE));
     } finally {
       FileUtils.deleteDirectory(tmpRootFolder.getParentFile());
@@ -636,7 +636,7 @@ public class TestDistributedShell {
     return entityFile;
   }
 
-  private long getNumOfStringOccurences(File entityFile, String searchString)
+  private long getNumOfStringOccurrences(File entityFile, String searchString)
       throws IOException {
     BufferedReader reader = null;
     String strLine;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 23b99d9..8dcf4be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -641,8 +641,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
     ContainerLaunch launch = new ContainerLaunch(context, conf, dispatcher,
         exec, app, container, dirsHandler, containerManager);
     launch.call();
-    Assert.assertTrue("ContainerExitEvent should have occured",
-        eventHandler.isContainerExitEventOccured());
+    Assert.assertTrue("ContainerExitEvent should have occurred",
+        eventHandler.isContainerExitEventOccurred());
   }
 
   private static class ContainerExitHandler implements EventHandler<Event> {
@@ -652,15 +652,15 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       this.testForMultiFile = testForMultiFile;
     }
 
-    boolean containerExitEventOccured = false;
+    boolean containerExitEventOccurred = false;
 
-    public boolean isContainerExitEventOccured() {
-      return containerExitEventOccured;
+    public boolean isContainerExitEventOccurred() {
+      return containerExitEventOccurred;
     }
 
     public void handle(Event event) {
       if (event instanceof ContainerExitEvent) {
-        containerExitEventOccured = true;
+        containerExitEventOccurred = true;
         ContainerExitEvent exitEvent = (ContainerExitEvent) event;
         Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
             exitEvent.getType());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
index c4b7a0d..cdbbc51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
@@ -343,43 +343,43 @@ public class TestParentQueue {
     csConf.setCapacity(Q_B, 70.5F);
 
     Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
-    boolean exceptionOccured = false;
+    boolean exceptionOccurred = false;
     try {
       CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null,
           CapacitySchedulerConfiguration.ROOT, queues, queues,
           TestUtils.spyHook);
     } catch (IllegalArgumentException ie) {
-      exceptionOccured = true;
+      exceptionOccurred = true;
     }
-    if (!exceptionOccured) {
+    if (!exceptionOccurred) {
       Assert.fail("Capacity is more then 100% so should be failed.");
     }
     csConf.setCapacity(Q_A, 30);
     csConf.setCapacity(Q_B, 70);
-    exceptionOccured = false;
+    exceptionOccurred = false;
     queues.clear();
     try {
       CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null,
           CapacitySchedulerConfiguration.ROOT, queues, queues,
           TestUtils.spyHook);
     } catch (IllegalArgumentException ie) {
-      exceptionOccured = true;
+      exceptionOccurred = true;
     }
-    if (exceptionOccured) {
+    if (exceptionOccurred) {
       Assert.fail("Capacity is 100% so should not be failed.");
     }
     csConf.setCapacity(Q_A, 30);
     csConf.setCapacity(Q_B, 70.005F);
-    exceptionOccured = false;
+    exceptionOccurred = false;
     queues.clear();
     try {
       CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null,
           CapacitySchedulerConfiguration.ROOT, queues, queues,
           TestUtils.spyHook);
     } catch (IllegalArgumentException ie) {
-      exceptionOccured = true;
+      exceptionOccurred = true;
     }
-    if (exceptionOccured) {
+    if (exceptionOccurred) {
       Assert
           .fail("Capacity is under PRECISION which is .05% so should not be failed.");
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org