You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2013/06/21 08:37:39 UTC

svn commit: r1495297 [10/46] - in /hadoop/common/branches/branch-1-win: ./ bin/ conf/ ivy/ lib/jdiff/ src/c++/libhdfs/docs/ src/c++/libhdfs/tests/conf/ src/contrib/capacity-scheduler/ivy/ src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred...

Modified: hadoop/common/branches/branch-1-win/src/c++/libhdfs/docs/libhdfs_footer.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/c%2B%2B/libhdfs/docs/libhdfs_footer.html?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/c++/libhdfs/docs/libhdfs_footer.html (original)
+++ hadoop/common/branches/branch-1-win/src/c++/libhdfs/docs/libhdfs_footer.html Fri Jun 21 06:37:27 2013
@@ -1,4 +1,19 @@
 <!--
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+<!--
   Custom footer for libhdfs documentation.
   Also useful for switching off default Doxygen footer and hence the timestamp.
 -->

Modified: hadoop/common/branches/branch-1-win/src/c++/libhdfs/tests/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/c%2B%2B/libhdfs/tests/conf/hdfs-site.xml?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/c++/libhdfs/tests/conf/hdfs-site.xml (original)
+++ hadoop/common/branches/branch-1-win/src/c++/libhdfs/tests/conf/hdfs-site.xml Fri Jun 21 06:37:27 2013
@@ -15,13 +15,6 @@
 </property>
 
 <property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>Allow appends to files.
-  </description>
-</property>
-
-<property>
   <name>dfs.datanode.address</name>
   <value>0.0.0.0:50012</value>
   <description>

Modified: hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerQueue.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerQueue.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerQueue.java Fri Jun 21 06:37:27 2013
@@ -1157,7 +1157,7 @@ class CapacitySchedulerQueue {
     
     int queueSlotsOccupied = getNumSlotsOccupied(taskType);
     int currentCapacity;
-    if (queueSlotsOccupied < queueCapacity) {
+    if (queueSlotsOccupied + numSlotsRequested <= queueCapacity) {
       currentCapacity = queueCapacity;
     }
     else {

Modified: hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java Fri Jun 21 06:37:27 2013
@@ -154,7 +154,8 @@ class CapacityTaskScheduler extends Task
     protected TaskType type = null;
 
     abstract TaskLookupResult obtainNewTask(TaskTrackerStatus taskTracker, 
-        JobInProgress job, boolean assignOffSwitch) throws IOException;
+        JobInProgress job, boolean assignOffSwitch,
+        ClusterStatus clusterStatus) throws IOException;
 
     int getSlotsOccupied(JobInProgress job) {
       return (getNumReservedTaskTrackers(job) + getRunningTasks(job)) * 
@@ -293,7 +294,8 @@ class CapacityTaskScheduler extends Task
     private TaskLookupResult getTaskFromQueue(TaskTracker taskTracker,
                                               int availableSlots,
                                               CapacitySchedulerQueue queue,
-                                              boolean assignOffSwitch)
+                                              boolean assignOffSwitch,
+                                              ClusterStatus clusterStatus)
     throws IOException {
       TaskTrackerStatus taskTrackerStatus = taskTracker.getStatus();
       // we only look at jobs in the running queues, as these are the ones
@@ -320,7 +322,8 @@ class CapacityTaskScheduler extends Task
                                                               availableSlots)) {
           // We found a suitable job. Get task from it.
           TaskLookupResult tlr = 
-            obtainNewTask(taskTrackerStatus, j, assignOffSwitch);
+            obtainNewTask(taskTrackerStatus, j, assignOffSwitch,
+                          clusterStatus);
           //if there is a task return it immediately.
           if (tlr.getLookUpStatus() == 
                   TaskLookupResult.LookUpStatus.LOCAL_TASK_FOUND || 
@@ -344,7 +347,7 @@ class CapacityTaskScheduler extends Task
           // starved
           if ((getPendingTasks(j) != 0 &&
               !hasSufficientReservedTaskTrackers(j)) &&
-                (taskTracker.getAvailableSlots(type) !=
+                !(j.getNumSlotsPerTask(type) >
                  getTTMaxSlotsForType(taskTrackerStatus, type))) {
             // Reserve all available slots on this tasktracker
             LOG.info(j.getJobID() + ": Reserving "
@@ -379,6 +382,11 @@ class CapacityTaskScheduler extends Task
 
       printQueues();
 
+      //MAPREDUCE-1684: somehow getClusterStatus seems to be expensive. Caching
+      //here to reuse during the scheduling
+      ClusterStatus clusterStatus =
+        scheduler.taskTrackerManager.getClusterStatus();
+
       // Check if this tasktracker has been reserved for a job...
       JobInProgress job = taskTracker.getJobForFallowSlot(type);
       if (job != null) {
@@ -397,7 +405,7 @@ class CapacityTaskScheduler extends Task
             // Don't care about locality!
             job.overrideSchedulingOpportunities();
           }
-          return obtainNewTask(taskTrackerStatus, job, true);
+          return obtainNewTask(taskTrackerStatus, job, true, clusterStatus);
         } else {
           // Re-reserve the current tasktracker
           taskTracker.reserveSlots(type, job, availableSlots);
@@ -420,7 +428,8 @@ class CapacityTaskScheduler extends Task
         }
         
         TaskLookupResult tlr = 
-          getTaskFromQueue(taskTracker, availableSlots, queue, assignOffSwitch);
+          getTaskFromQueue(taskTracker, availableSlots, queue, assignOffSwitch,
+                          clusterStatus);
         TaskLookupResult.LookUpStatus lookUpStatus = tlr.getLookUpStatus();
 
         if (lookUpStatus == TaskLookupResult.LookUpStatus.NO_TASK_FOUND) {
@@ -501,10 +510,10 @@ class CapacityTaskScheduler extends Task
 
     @Override
     TaskLookupResult obtainNewTask(TaskTrackerStatus taskTracker, 
-                                   JobInProgress job, boolean assignOffSwitch) 
+                                   JobInProgress job, boolean assignOffSwitch,
+                                   ClusterStatus clusterStatus)
     throws IOException {
-      ClusterStatus clusterStatus = 
-        scheduler.taskTrackerManager.getClusterStatus();
+
       int numTaskTrackers = clusterStatus.getTaskTrackers();
       int numUniqueHosts = scheduler.taskTrackerManager.getNumberOfUniqueHosts();
       
@@ -581,10 +590,9 @@ class CapacityTaskScheduler extends Task
 
     @Override
     TaskLookupResult obtainNewTask(TaskTrackerStatus taskTracker, 
-                                   JobInProgress job, boolean unused) 
+                                   JobInProgress job, boolean unused,
+                                   ClusterStatus clusterStatus)
     throws IOException {
-      ClusterStatus clusterStatus = 
-        scheduler.taskTrackerManager.getClusterStatus();
       int numTaskTrackers = clusterStatus.getTaskTrackers();
       Task t = job.obtainNewReduceTask(taskTracker, numTaskTrackers, 
           scheduler.taskTrackerManager.getNumberOfUniqueHosts());
@@ -1042,10 +1050,20 @@ class CapacityTaskScheduler extends Task
      */ 
     updateAllQueues(mapClusterCapacity, reduceClusterCapacity);
     
-    // schedule tasks
+    /*
+     * Schedule tasks
+     */
+    
     List<Task> result = new ArrayList<Task>();
-    addMapTasks(taskTracker, result, maxMapSlots, currentMapSlots);
-    addReduceTask(taskTracker, result, maxReduceSlots, currentReduceSlots);
+    
+    // Check for JT safe-mode
+    if (taskTrackerManager.isInSafeMode()) {
+      LOG.info("JobTracker is in safe-mode, not scheduling any tasks.");
+    } else {
+      addMapTasks(taskTracker, result, maxMapSlots, currentMapSlots);
+      addReduceTask(taskTracker, result, maxReduceSlots, currentReduceSlots);
+    }
+    
     return result;
   }
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java Fri Jun 21 06:37:27 2013
@@ -688,6 +688,12 @@ public class TestCapacityScheduler exten
     public QueueManager getQueueManager() {
       return qm;
     }
+
+    @Override
+    public boolean isInSafeMode() {
+      // TODO Auto-generated method stub
+      return false;
+    }
   }
   
   // represents a fake queue configuration info
@@ -696,12 +702,18 @@ public class TestCapacityScheduler exten
     float capacity;
     boolean supportsPrio;
     int ulMin;
+    Float ulFactor;
 
     public FakeQueueInfo(String queueName, float capacity, boolean supportsPrio, int ulMin) {
+      this(queueName, capacity, supportsPrio, ulMin, null);
+    }
+
+    public FakeQueueInfo(String queueName, float capacity, boolean supportsPrio, int ulMin, Float ulFactor) {
       this.queueName = queueName;
       this.capacity = capacity;
       this.supportsPrio = supportsPrio;
       this.ulMin = ulMin;
+      this.ulFactor = ulFactor;
     }
   }
   
@@ -727,18 +739,29 @@ public class TestCapacityScheduler exten
     /*public synchronized String getFirstQueue() {
       return firstQueue;
     }*/
-    
+
+    @Override
     public float getCapacity(String queue) {
       if(queueMap.get(queue).capacity == -1) {
         return super.getCapacity(queue);
       }
       return queueMap.get(queue).capacity;
     }
-    
+
+    @Override
     public int getMinimumUserLimitPercent(String queue) {
       return queueMap.get(queue).ulMin;
     }
-    
+
+    @Override
+    public float getUserLimitFactor(String queue) {
+      if(queueMap.get(queue).ulFactor != null) {
+        return queueMap.get(queue).ulFactor;
+      }
+      return super.getUserLimitFactor(queue);
+    }
+
+    @Override
     public boolean isPrioritySupported(String queue) {
       return queueMap.get(queue).supportsPrio;
     }
@@ -1327,6 +1350,88 @@ public class TestCapacityScheduler exten
   }
 
   /**
+   * Test checks that high memory job is able to consume more slots then
+   * queue's configured capacity, but not more then max capacity.
+   * (of course, if user-limit-factor was set up properly)
+   */
+  public void testHighMemoryCanConsumeMaxCapacity() throws IOException {
+    //cluster with 20 map and 20 reduce slots
+    final int NUM_MAP_SLOTS = 4;
+    final int NUM_REDUCE_SLOTS = 4;
+    final int NUM_TASK_TRACKERS = 5;
+
+    taskTrackerManager =
+      new FakeTaskTrackerManager(NUM_TASK_TRACKERS, NUM_MAP_SLOTS, NUM_REDUCE_SLOTS);
+
+    //Q1 capacity is 4*5*0.5=10 map and 4*5*0.5=10 reduce slots
+    final String Q1 = "q1";
+    final float Q1_CAP = 50.f;
+    final int Q1_ULMIN = 50;
+    final float Q1_ULFACTOR = 2;
+
+    //Q2 just to fill sum capacity up to 100%
+    final String Q2 = "q2";
+    final float Q2_CAP = 50.f;
+    final int Q2_ULMIN = 50;
+
+    taskTrackerManager.addQueues(new String[] { Q1, Q2 });
+    ArrayList<FakeQueueInfo> queues = new ArrayList<FakeQueueInfo>();
+
+
+    queues.add(new FakeQueueInfo(Q1, Q1_CAP, true, Q1_ULMIN, Q1_ULFACTOR));
+    queues.add(new FakeQueueInfo(Q2, Q2_CAP, true, Q2_ULMIN));
+    resConf.setFakeQueues(queues);
+
+    //q1 can go up to 4*5*0.8=16 map and 4*5*0.8=16 reduce slots
+    resConf.setMaxCapacity(Q1, 80.0f);
+
+    //configure and start scheduler
+    scheduler.setTaskTrackerManager(taskTrackerManager);
+    scheduler.getConf().setLong(
+        JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY,
+        4 * 1024);
+    scheduler.getConf().setLong(
+        JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 1 * 1024);
+    scheduler.getConf().setLong(
+        JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY,
+        4 * 1024);
+    scheduler.getConf().setLong(
+        JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1 * 1024);
+    scheduler.setResourceManagerConf(resConf);
+    scheduler.start();
+
+    //submit high mem job with 5 mappers and 1 reducer with 4 slots each
+    JobConf jConf = new JobConf(conf);
+    jConf.setMemoryForMapTask(4 * 1024);
+    jConf.setMemoryForReduceTask(4 * 1024);
+    jConf.setNumMapTasks(5);
+    jConf.setNumReduceTasks(1);
+    jConf.setQueueName(Q1);
+    jConf.setUser("u1");
+    FakeJobInProgress job1 = submitJobAndInit(JobStatus.PREP, jConf);
+
+    //tt1-tt4 are full (max capacity of q1 is 16 slots)
+    List<Task> tasks = checkAssignments("tt1",
+        new String[] {
+        "attempt_test_0001_m_000001_0 on tt1",
+        "attempt_test_0001_r_000001_0 on tt1"});
+    List<Task> tasks2 = checkAssignments("tt2",
+        new String[] {"attempt_test_0001_m_000002_0 on tt2"});
+    List<Task> tasks3 = checkAssignments("tt3",
+            new String[] {"attempt_test_0001_m_000003_0 on tt3"});
+    List<Task> tasks4 = checkAssignments("tt4",
+            new String[] {"attempt_test_0001_m_000004_0 on tt4"});
+
+    assertTrue("Shouldn't assign more slots (reached max capacity)",
+        scheduler.assignTasks(tracker("tt5")).isEmpty());
+
+    checkOccupiedSlots(Q1, TaskType.MAP, 1, 16, 160.0f, 1, 0);
+    checkOccupiedSlots(Q1, TaskType.REDUCE, 1, 4, 40.0f, 0, 2);
+
+    //don't check 5th map task completeness. That's not this test case.
+  }
+
+  /**
    * Creates a queue with max capacity  of 50%
    * submit 1 job in the queue which is high ram(2 slots) . As 2 slots are
    * given to high ram job and are reserved , no other tasks are accepted .

Modified: hadoop/common/branches/branch-1-win/src/contrib/data_join/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/data_join/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/data_join/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/data_join/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/build.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/build.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/build.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/build.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 output.. = bin/
 bin.includes = META-INF/,\
                plugin.xml,\

Modified: hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/eclipse-plugin/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/failmon/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/failmon/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/failmon/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/failmon/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy.xml?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy.xml (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy.xml Fri Jun 21 06:37:27 2013
@@ -82,5 +82,10 @@
       name="commons-lang"
       rev="${commons-lang.version}"
       conf="common->master"/>
+    <dependency org="org.mockito" 
+      name="mockito-all" 
+      rev="${mockito-all.version}" 
+      conf="common->default">
+    </dependency>
   </dependencies>
 </ivy-module>

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java Fri Jun 21 06:37:27 2013
@@ -28,8 +28,8 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.serve
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.metrics.Updater;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 
@@ -275,12 +276,19 @@ public class FairScheduler extends TaskS
 
   private class JobInitializer {
     private final int DEFAULT_NUM_THREADS = 1;
-    private ExecutorService threadPool;
+    private ThreadPoolExecutor threadPool;
     private TaskTrackerManager ttm;
     public JobInitializer(Configuration conf, TaskTrackerManager ttm) {
       int numThreads = conf.getInt("mapred.jobinit.threads",
           DEFAULT_NUM_THREADS);
-      threadPool = Executors.newFixedThreadPool(numThreads);
+      threadPool = new ThreadPoolExecutor(numThreads, numThreads, 0L,
+					TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
+      // Pre-starting all threads to ensure the threads are executed as JobTracker
+      // instead of the user submitted the job, otherwise job initialization fails
+      // when security is enabled
+      if (threadPool.prestartAllCoreThreads() != numThreads) {
+          throw new RuntimeException("Failed to pre-start threads in JobInitializer");
+      }
       this.ttm = ttm;
     }
     public void initJob(JobInfo jobInfo, JobInProgress job) {
@@ -320,8 +328,17 @@ public class FairScheduler extends TaskS
     public void jobAdded(JobInProgress job) {
       synchronized (FairScheduler.this) {
         eventLog.log("JOB_ADDED", job.getJobID());
-        JobInfo info = new JobInfo(new JobSchedulable(FairScheduler.this, job, TaskType.MAP),
-            new JobSchedulable(FairScheduler.this, job, TaskType.REDUCE));
+        JobSchedulable mapSched = ReflectionUtils.newInstance(
+            conf.getClass("mapred.jobtracker.jobSchedulable", JobSchedulable.class,
+                JobSchedulable.class), conf);
+        mapSched.init(FairScheduler.this, job, TaskType.MAP);
+
+        JobSchedulable redSched = ReflectionUtils.newInstance(
+            conf.getClass("mapred.jobtracker.jobSchedulable", JobSchedulable.class,
+                JobSchedulable.class), conf);
+        redSched.init(FairScheduler.this, job, TaskType.REDUCE);
+
+        JobInfo info = new JobInfo(mapSched, redSched);
         infos.put(job, info);
         poolMgr.addJob(job); // Also adds job into the right PoolScheduable
         update();
@@ -412,7 +429,13 @@ public class FairScheduler extends TaskS
 
     // Update time waited for local maps for jobs skipped on last heartbeat
     updateLocalityWaitTimes(currentTime);
-    
+
+    // Check for JT safe-mode
+    if (taskTrackerManager.isInSafeMode()) {
+      LOG.info("JobTracker is in safe-mode, not scheduling any tasks.");
+      return null;
+    } 
+
     TaskTrackerStatus tts = tracker.getStatus();
 
     int mapsAssigned = 0; // loop counter for map in the below while loop
@@ -538,15 +561,16 @@ public class FairScheduler extends TaskS
    * The scheduler may launch fewer than this many tasks if the LoadManager
    * says not to launch more, but it will never launch more than this number.
    */
-  private int maxTasksToAssign(TaskType type, TaskTrackerStatus tts) {
+  protected int maxTasksToAssign(TaskType type, TaskTrackerStatus tts) {
     if (!assignMultiple)
       return 1;
     int cap = (type == TaskType.MAP) ? mapAssignCap : reduceAssignCap;
+    int availableSlots = (type == TaskType.MAP) ?
+        tts.getAvailableMapSlots(): tts.getAvailableReduceSlots();
     if (cap == -1) // Infinite cap; use the TaskTracker's slot count
-      return (type == TaskType.MAP) ?
-          tts.getAvailableMapSlots(): tts.getAvailableReduceSlots();
+      return availableSlots;
     else
-      return cap;
+      return Math.min(cap, availableSlots);
   }
 
   /**
@@ -571,8 +595,10 @@ public class FairScheduler extends TaskS
   private void updateLastMapLocalityLevel(JobInProgress job,
       Task mapTaskLaunched, TaskTrackerStatus tracker) {
     JobInfo info = infos.get(job);
+    boolean isNodeGroupAware = conf.getBoolean(
+        "net.topology.nodegroup.aware", false);
     LocalityLevel localityLevel = LocalityLevel.fromTask(
-        job, mapTaskLaunched, tracker);
+        job, mapTaskLaunched, tracker, isNodeGroupAware);
     info.lastMapLocalityLevel = localityLevel;
     info.timeWaitedForLocalMap = 0;
     eventLog.log("ASSIGNED_LOC_LEVEL", job.getJobID(), localityLevel);
@@ -1020,6 +1046,9 @@ public class FairScheduler extends TaskS
 
   @Override
   public synchronized Collection<JobInProgress> getJobs(String queueName) {
+    if (queueName == null) {
+      return null;
+    }
     Pool myJobPool = poolMgr.getPool(queueName);
     return myJobPool.getJobs();
   }
@@ -1072,16 +1101,8 @@ public class FairScheduler extends TaskS
           else return p1.getName().compareTo(p2.getName());
         }});
       for (Pool pool: pools) {
-        int runningMaps = 0;
-        int runningReduces = 0;
-        for (JobInProgress job: pool.getJobs()) {
-          JobInfo info = infos.get(job);
-          if (info != null) {
-            // TODO: Fix
-            //runningMaps += info.runningMaps;
-            //runningReduces += info.runningReduces;
-          }
-        }
+        int runningMaps = pool.getMapSchedulable().getRunningTasks();
+        int runningReduces = pool.getReduceSchedulable().getRunningTasks();
         String name = pool.getName();
         eventLog.log("POOL",
             name, poolMgr.getPoolWeight(name), pool.getJobs().size(),

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerEventLog.java Fri Jun 21 06:37:27 2013
@@ -76,12 +76,11 @@ class FairSchedulerEventLog {
       logDir = conf.get("mapred.fairscheduler.eventlog.location",
           new File(System.getProperty("hadoop.log.dir")).getAbsolutePath()
           + File.separator + "fairscheduler");
-      Path logDirPath = new Path(logDir);
-      FileSystem fs = logDirPath.getFileSystem(conf);
-      if (!fs.exists(logDirPath)) {
-        if (!fs.mkdirs(logDirPath)) {
+      File logDirFile = new File(logDir);
+      if (!logDirFile.exists()) {
+        if (!logDirFile.mkdirs()) {
           throw new IOException(
-              "Mkdirs failed to create " + logDirPath.toString());
+              "Mkdirs failed to create " + logDirFile.toString());
         }
       }
       String username = System.getProperty("user.name");
@@ -125,6 +124,10 @@ class FairSchedulerEventLog {
     }
   }
   
+  String getLogFile() {
+    return logFile;
+  }
+  
   /**
    * Flush and close the log.
    */

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulable.java Fri Jun 21 06:37:27 2013
@@ -25,9 +25,9 @@ import org.apache.hadoop.mapred.FairSche
 import org.apache.hadoop.mapreduce.TaskType;
 
 public class JobSchedulable extends Schedulable {
-  private FairScheduler scheduler;
-  private JobInProgress job;
-  private TaskType taskType;
+  protected FairScheduler scheduler;
+  protected JobInProgress job;
+  protected TaskType taskType;
   private int demand = 0;
 
   public JobSchedulable(FairScheduler scheduler, JobInProgress job, 
@@ -38,6 +38,18 @@ public class JobSchedulable extends Sche
     
     initMetrics();
   }
+
+  public JobSchedulable() {
+  }
+
+  public void init(FairScheduler scheduler, JobInProgress job,
+      TaskType taskType) {
+    this.scheduler = scheduler;
+    this.job = job;
+    this.taskType = taskType;
+
+    initMetrics();
+  }
   
   @Override
   public TaskType getTaskType() {
@@ -87,7 +99,7 @@ public class JobSchedulable extends Sche
     }
   }
 
-  private boolean isRunnable() {
+  protected boolean isRunnable() {
     JobInfo info = scheduler.getJobInfo(job);
     int runState = job.getStatus().getRunState();
     return (info != null && info.runnable && runState == JobStatus.RUNNING);

Added: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulableWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulableWithNodeGroup.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulableWithNodeGroup.java (added)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/JobSchedulableWithNodeGroup.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,62 @@
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.hadoop.mapreduce.TaskType;
+
+public class JobSchedulableWithNodeGroup extends JobSchedulable {
+
+  public JobSchedulableWithNodeGroup(FairScheduler scheduler,
+      JobInProgress job, TaskType taskType) {
+    super(scheduler, job, taskType);
+  }
+
+  public JobSchedulableWithNodeGroup() {
+  }
+
+  @Override
+  public Task assignTask(TaskTrackerStatus tts, long currentTime,
+      Collection<JobInProgress> visited) throws IOException {
+    if (isRunnable()) {
+      visited.add(job);
+      TaskTrackerManager ttm = scheduler.taskTrackerManager;
+      ClusterStatus clusterStatus = ttm.getClusterStatus();
+      int numTaskTrackers = clusterStatus.getTaskTrackers();
+
+      // check with the load manager whether it is safe to 
+      // launch this task on this taskTracker.
+      LoadManager loadMgr = scheduler.getLoadManager();
+      if (!loadMgr.canLaunchTask(tts, job, taskType)) {
+        return null;
+      }
+      if (taskType == TaskType.MAP) {
+        LocalityLevel localityLevel = scheduler.getAllowedLocalityLevel(
+            job, currentTime);
+        scheduler.getEventLog().log(
+            "ALLOWED_LOC_LEVEL", job.getJobID(), localityLevel);
+        switch (localityLevel) {
+          case NODE:
+            return job.obtainNewNodeLocalMapTask(tts, numTaskTrackers,
+                ttm.getNumberOfUniqueHosts());
+          case NODEGROUP:
+            // locality level for nodegroup is 2
+            return job.obtainNewMapTaskCommon(tts, numTaskTrackers, 
+                ttm.getNumberOfUniqueHosts(), 2);
+          case RACK:
+            return job.obtainNewNodeOrRackLocalMapTask(tts, numTaskTrackers,
+                ttm.getNumberOfUniqueHosts());
+          default:
+            return job.obtainNewMapTask(tts, numTaskTrackers,
+                ttm.getNumberOfUniqueHosts());
+        }
+      } else {
+        return job.obtainNewReduceTask(tts, numTaskTrackers,
+            ttm.getNumberOfUniqueHosts());
+      }
+    } else {
+      return null;
+    }
+  }
+
+}

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LocalityLevel.java Fri Jun 21 06:37:27 2013
@@ -23,13 +23,14 @@ package org.apache.hadoop.mapred;
  * is allowed to launch tasks. By default, jobs are not allowed to launch
  * non-data-local tasks until they have waited a small number of seconds to
  * find a slot on a node that they have data on. If a job has waited this
- * long, it is allowed to launch rack-local tasks as well (on nodes that may
- * not have the task's input data, but share a rack with a node that does).
- * Finally, after a further wait, jobs are allowed to launch tasks anywhere
- * in the cluster.
+ * long, it is allowed to launch other locality tasks as well, such as: 
+ * nodegroup-local if the topology support nodegroup layer, rack-local (on 
+ * nodes that may not have the task's input data, but share a rack with a node
+ * that does). Finally, after a further wait, jobs are allowed to launch tasks
+ * anywhere in the cluster.
  * 
- * This enum defines three levels - NODE, RACK and ANY (for allowing tasks
- * to be launched on any node). A map task's level can be obtained from
+ * This enum defines four levels - NODE, NODEGROUP, RACK and ANY (for allowing
+ * tasks to be launched on any node). A map task's level can be obtained from
  * its job through {@link #fromTask(JobInProgress, Task, TaskTrackerStatus)}. In
  * addition, for any locality level, it is possible to get a "level cap" to pass
  * to {@link JobInProgress#obtainNewMapTask(TaskTrackerStatus, int, int, int)}
@@ -37,16 +38,25 @@ package org.apache.hadoop.mapred;
  * the {@link #toCacheLevelCap()} method.
  */
 public enum LocalityLevel {
-  NODE, RACK, ANY;
+  NODE, NODEGROUP, RACK, ANY;
   
   public static LocalityLevel fromTask(JobInProgress job, Task mapTask,
-      TaskTrackerStatus tracker) {
+      TaskTrackerStatus tracker, boolean isNodeGroupAware) {
     TaskID tipID = mapTask.getTaskID().getTaskID();
     TaskInProgress tip = job.getTaskInProgress(tipID);
-    switch (job.getLocalityLevel(tip, tracker)) {
-    case 0: return LocalityLevel.NODE;
-    case 1: return LocalityLevel.RACK;
-    default: return LocalityLevel.ANY;
+    if (isNodeGroupAware) {
+      switch (job.getLocalityLevel(tip, tracker)) {
+        case 0: return LocalityLevel.NODE;
+        case 1: return LocalityLevel.NODEGROUP;
+        case 2: return LocalityLevel.RACK;
+        default: return LocalityLevel.ANY;
+      }
+    } else {
+      switch (job.getLocalityLevel(tip, tracker)) {
+        case 0: return LocalityLevel.NODE;
+        case 1: return LocalityLevel.RACK;
+        default: return LocalityLevel.ANY;
+      }
     }
   }
   
@@ -55,11 +65,20 @@ public enum LocalityLevel {
    * {@link JobInProgress#obtainNewMapTask(TaskTrackerStatus, int, int, int)}
    * to ensure that only tasks of this locality level and lower are launched.
    */
-  public int toCacheLevelCap() {
-    switch(this) {
-    case NODE: return 1;
-    case RACK: return 2;
-    default: return Integer.MAX_VALUE;
+  public int toCacheLevelCap(boolean isNodeGroupAware) {
+    if (isNodeGroupAware) {
+      switch(this) {
+        case NODE: return 1;
+        case NODEGROUP: return 2;
+        case RACK: return 3;
+        default: return Integer.MAX_VALUE;
+      }
+    } else {
+      switch(this) {
+        case NODE: return 1;
+        case RACK: return 2;
+        default: return Integer.MAX_VALUE;
+      }
     }
   }
 }

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java Fri Jun 21 06:37:27 2013
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.metrics.MetricsContext;
 
 /**
  * A schedulable pool of jobs.
@@ -44,6 +43,9 @@ public class Pool {
   private PoolSchedulable reduceSchedulable;
 
   public Pool(FairScheduler scheduler, String name) {
+    if (name == null) {
+      throw new IllegalArgumentException("Passed pool name was null.");
+    }
     this.name = name;
     mapSchedulable = new PoolSchedulable(scheduler, this, TaskType.MAP);
     reduceSchedulable = new PoolSchedulable(scheduler, this, TaskType.REDUCE);

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolSchedulable.java Fri Jun 21 06:37:27 2013
@@ -79,15 +79,20 @@ public class PoolSchedulable extends Sch
    */
   @Override
   public void updateDemand() {
+    // limit the demand to maxTasks
+    int maxTasks = poolMgr.getMaxSlots(pool.getName(), taskType);
     demand = 0;
     for (JobSchedulable sched: jobScheds) {
       sched.updateDemand();
       demand += sched.getDemand();
+      if (demand >= maxTasks) {
+        demand = maxTasks;
+        break;
+      }
     }
-    // if demand exceeds the cap for this pool, limit to the max
-    int maxTasks = poolMgr.getMaxSlots(pool.getName(), taskType);
-    if(demand > maxTasks) {
-      demand = maxTasks;
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("The pool " + pool.getName() + " demand is " + demand
+          + "; maxTasks is " + maxTasks);
     }
   }
   

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java Fri Jun 21 06:37:27 2013
@@ -27,7 +27,6 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.IdentityHashMap;
 import java.util.LinkedHashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -36,10 +35,7 @@ import java.util.TreeMap;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.mapred.FairScheduler.JobInfo;
-import org.apache.hadoop.mapred.MRConstants;
-import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapred.JobInProgress.KillInterruptedException;
 import org.apache.hadoop.mapred.UtilsForTests.FakeClock;
 import org.apache.hadoop.mapreduce.TaskType;
@@ -51,7 +47,7 @@ import org.apache.hadoop.metrics.Metrics
 import org.apache.hadoop.metrics.spi.NoEmitMetricsContext;
 import org.apache.hadoop.metrics.spi.OutputRecord;
 import org.apache.hadoop.net.Node;
-import org.mortbay.log.Log;
+import org.mockito.Mockito;
 
 public class TestFairScheduler extends TestCase {
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
@@ -513,6 +509,12 @@ public class TestFairScheduler extends T
       trackerForTip.get(attemptIdStr).getTaskReports().remove(status);
       return true;
     }
+
+    @Override
+    public boolean isInSafeMode() {
+      // TODO Auto-generated method stub
+      return false;
+    }
   }
   
   protected JobConf conf;
@@ -557,6 +559,10 @@ public class TestFairScheduler extends T
     clock = new FakeClock();
     try {
       jobTracker = new JobTracker(conf, clock);
+      jobTracker.setSafeModeInternal(JobTracker.SafeModeAction.SAFEMODE_ENTER);
+      jobTracker.initializeFilesystem();
+      jobTracker.setSafeModeInternal(JobTracker.SafeModeAction.SAFEMODE_LEAVE);
+      jobTracker.initialize();
     } catch (Exception e) {
       throw new RuntimeException("Could not start JT", e);
     }
@@ -2845,6 +2851,35 @@ public class TestFairScheduler extends T
     assertEquals(0,    poolA.getReduceSchedulable().getDemand());
   }
   
+  public void testMaxTasksToAssign() {
+    TaskTrackerStatus mockTTS = Mockito.mock(TaskTrackerStatus.class);
+    TaskType type = TaskType.MAP;
+    Mockito.when(mockTTS.getAvailableMapSlots()).thenReturn(5);
+    
+    FairScheduler fs = new FairScheduler(null, false);
+
+    // Case 1: assignMultiple is false
+    fs.assignMultiple = false;
+    assertEquals("Number of tasks to assign", 1,
+        fs.maxTasksToAssign(type, mockTTS));
+
+    // Case 2: assignMultiple is true, cap = -1
+    fs.assignMultiple = true;
+    fs.mapAssignCap = -1;
+    assertEquals("Number of tasks to assign", 5,
+        fs.maxTasksToAssign(type, mockTTS));
+
+    // Case 3: cap = 10
+    fs.mapAssignCap = 10;
+    assertEquals("Number of tasks to assign", 5,
+        fs.maxTasksToAssign(type, mockTTS));
+
+    // Case 4: cap = 2
+    fs.mapAssignCap = 2;
+    assertEquals("Number of tasks to assign", 2,
+        fs.maxTasksToAssign(type, mockTTS));
+  }
+
   private void advanceTime(long time) {
     clock.advance(time);
     scheduler.update();

Added: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerEventLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerEventLog.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerEventLog.java (added)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerEventLog.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.TestFairScheduler.FakeTaskTrackerManager;
+
+public class TestFairSchedulerEventLog extends TestCase {
+  
+  private File logFile;
+  
+  /**
+   * Make sure the scheduler creates the event log.
+   */
+  public void testCreateEventLog() throws IOException {
+    Configuration conf = new Configuration();
+    conf.set("mapred.fairscheduler.eventlog.enabled", "true");
+    
+    FakeTaskTrackerManager taskTrackerManager = new FakeTaskTrackerManager(1, 1);
+    
+    FairScheduler scheduler = new FairScheduler();
+    scheduler.setConf(conf);
+    scheduler.setTaskTrackerManager(taskTrackerManager);
+    scheduler.start();
+    
+    FairSchedulerEventLog eventLog = scheduler.getEventLog();
+    
+    scheduler.terminate();
+    
+    logFile = new File(eventLog.getLogFile());
+    Assert.assertTrue(logFile.exists());
+  }
+  
+  @Override
+  public void tearDown() {
+    logFile.delete();
+    logFile.getParentFile().delete(); // fairscheduler/
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerPoolNames.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerPoolNames.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerPoolNames.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairSchedulerPoolNames.java Fri Jun 21 06:37:27 2013
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.Pool;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -175,4 +176,18 @@ public class TestFairSchedulerPoolNames 
         "Add pool name to the fair scheduler allocation file"));
   }
 
-}
\ No newline at end of file
+  /**
+   * Tests that no Pool object can be created with a null string.
+   */
+  @Test
+  public void testPoolNameNotNull() {
+    try {
+      Pool pool = new Pool(null, null);
+      fail("Pool object got created with a null name somehow.");
+    } catch (IllegalArgumentException e) {
+      // Pass
+    } catch (Exception e) {
+      fail("Pool object got created with a null name and failed only later.");
+    }
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/src/fuse_options.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/src/fuse_options.c?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/src/fuse_options.c (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/fuse-dfs/src/fuse_options.c Fri Jun 21 06:37:27 2013
@@ -110,7 +110,7 @@ int dfs_options(void *data, const char *
     options.usetrash = 1;
     break;
   case KEY_NOTRASH:
-    options.usetrash = 1;
+    options.usetrash = 0;
     break;
   case KEY_RO:
     options.read_only = 1;

Modified: hadoop/common/branches/branch-1-win/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java Fri Jun 21 06:37:27 2013
@@ -155,10 +155,11 @@ class GenerateDistCacheData extends Grid
       FSDataOutputStream dos =
           FileSystem.create(fs, path, new FsPermission((short)0755));
 
-      for (long bytes = key.get(); bytes > 0; bytes -= val.getLength()) {
+      int size = 0;
+      for (long bytes = key.get(); bytes > 0; bytes -= size) {
         r.nextBytes(val.getBytes());
-        val.setSize((int)Math.min(val.getLength(), bytes));
-        dos.write(val.getBytes(), 0, val.getLength());// Write to distCache file
+        size = (int)Math.min(val.getLength(), bytes);
+        dos.write(val.getBytes(), 0, size);// Write to distCache file
       }
       dos.close();
     }

Modified: hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/configuration.xsl
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/configuration.xsl?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/configuration.xsl (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/configuration.xsl Fri Jun 21 06:37:27 2013
@@ -1,4 +1,17 @@
 <?xml version="1.0"?>
+<!--
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
 <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
 <xsl:output method="html"/>
 <xsl:template match="configuration">

Modified: hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/log4j.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/log4j.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/conf/log4j.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 # Define some default values that can be overridden by system properties
 hdfsproxy.root.logger=INFO,console
 hdfsproxy.log.dir=.

Modified: hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by hadoop.
 #It drives ivy and the generation of a maven POM
 #These are the versions of our dependencies (in alphabetical order)

Modified: hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/KerberosAuthorizationFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/KerberosAuthorizationFilter.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/KerberosAuthorizationFilter.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/KerberosAuthorizationFilter.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,18 @@
+/**
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
 package org.apache.hadoop.hdfsproxy;
 
 import org.apache.hadoop.conf.Configuration;

Modified: hadoop/common/branches/branch-1-win/src/contrib/hod/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/hod/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/hod/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/hod/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/index/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/index/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/index/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/index/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by index.
 
 #These are the versions of our dependencies (in alphabetical order)

Modified: hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy.xml?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy.xml (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy.xml Fri Jun 21 06:37:27 2013
@@ -90,10 +90,6 @@
       name="jets3t"
       rev="${jets3t.version}"
       conf="common->master"/>  -->
-<!--    <dependency org="commons-net"
-      name="commons-net"
-      rev="${commons-net.version}"
-      conf="common->master"/>  -->
     <dependency org="commons-codec"
       name="commons-codec"
       rev="${commons-codec.version}"

Modified: hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/streaming/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java Fri Jun 21 06:37:27 2013
@@ -107,10 +107,10 @@ public class TestMultipleCachefiles exte
         file.writeBytes(mapString2 + "\n");
         file.close();
         file = fileSys.create(new Path(CACHE_FILE));
-        file.writeBytes(cacheString);
+        file.writeBytes(cacheString + "\n");
         file.close();
         file = fileSys.create(new Path(CACHE_FILE_2));
-        file.writeBytes(cacheString2);
+        file.writeBytes(cacheString2 + "\n");
         file.close();
           
         job = new StreamJob(argv, mayExit);     

Modified: hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java Fri Jun 21 06:37:27 2013
@@ -46,7 +46,7 @@ public class TestUlimit extends TestCase
   MiniDFSCluster dfs = null;
   MiniMRCluster mr = null;
   FileSystem fs = null;
-  private static String SET_MEMORY_LIMIT = "786432"; // 768MB
+  private static String SET_MEMORY_LIMIT = "1786432"; // 1768MB
 
   String[] genArgs(String memLimit) {
     return new String[] {
@@ -57,6 +57,8 @@ public class TestUlimit extends TestCase
       "-numReduceTasks", "0",
       "-jobconf", "mapred.map.tasks=1",
       "-jobconf", JobConf.MAPRED_MAP_TASK_ULIMIT + "=" + memLimit,
+      "-jobconf", "mapred.child.java.opts=-Xmx512m",
+      "-jobconf", "mapred.child.env=MALLOC_ARENA_MAX=1",
       "-jobconf", "mapred.job.tracker=" + "localhost:" +
                                            mr.getJobTrackerPort(),
       "-jobconf", "fs.default.name=" + "hdfs://localhost:" 

Modified: hadoop/common/branches/branch-1-win/src/contrib/thriftfs/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/thriftfs/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/thriftfs/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/thriftfs/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by thrifts.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,18 @@
+/**
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
 package org.apache.hadoop.thriftfs;
 
 import com.facebook.thrift.TException;

Modified: hadoop/common/branches/branch-1-win/src/contrib/vaidya/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/contrib/vaidya/ivy/libraries.properties?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/contrib/vaidya/ivy/libraries.properties (original)
+++ hadoop/common/branches/branch-1-win/src/contrib/vaidya/ivy/libraries.properties Fri Jun 21 06:37:27 2013
@@ -1,3 +1,16 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+
 #This properties file lists the versions of the various artifacts used by streaming.
 #It drives ivy and the generation of a maven POM
 

Modified: hadoop/common/branches/branch-1-win/src/core/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/core-default.xml?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/core-default.xml (original)
+++ hadoop/common/branches/branch-1-win/src/core/core-default.xml Fri Jun 21 06:37:27 2013
@@ -45,6 +45,15 @@
 </property>
 
 <property>
+  <name>hadoop.security.instrumentation.requires.admin</name>
+  <value>false</value>
+  <description>
+    Indicates if administrator ACLs are required to access
+    instrumentation servlets (JMX, METRICS, CONF, STACKS).
+  </description>
+</property>
+
+<property>
   <name>hadoop.security.authentication</name>
   <value>simple</value>
   <description>Possible values are simple (no authentication), and kerberos
@@ -63,6 +72,17 @@
   </description>
 </property>
 
+<property>
+  <name>hadoop.security.use-weak-http-crypto</name>
+  <value>false</value>
+  <description>If enabled, use KSSL to authenticate HTTP connections to the
+  NameNode. Due to a bug in JDK6, using KSSL requires one to configure
+  Kerberos tickets to use encryption types that are known to be
+  cryptographically weak. If disabled, SPNEGO will be used for HTTP
+  authentication, which supports stronger encryption types.
+  </description>
+</property>
+
 <!--
 <property>
   <name>hadoop.security.service.user.name.key</name>
@@ -560,4 +580,41 @@
   </description>
 </property>
 
+<property>
+  <name>hadoop.relaxed.worker.version.check</name>
+  <value>false</value>
+  <description>
+    By default datanodes refuse to connect to namenodes if their build
+    revision (svn revision) do not match, and tasktrackers refuse to
+    connect to jobtrackers if their build version (version, revision,
+    user, and source checksum) do not match. This option changes the
+    behavior of hadoop workers to only check for a version match (eg
+    "1.0.2") but ignore the other build fields (revision, user, and
+    source checksum).
+  </description>
+</property>
+
+<property>
+  <name>hadoop.skip.worker.version.check</name>
+  <value>false</value>
+  <description>
+    By default datanodes refuse to connect to namenodes if their build
+    revision (svn revision) do not match, and tasktrackers refuse to
+    connect to jobtrackers if their build version (version, revision,
+    user, and source checksum) do not match. This option changes the
+    behavior of hadoop workers to skip doing a version check at all.
+    This option supersedes the 'hadoop.relaxed.worker.version.check'
+    option.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.jetty.logs.serve.aliases</name>
+  <value>true</value>
+  <description>
+    Enable/Disable aliases serving from jetty
+  </description>
+</property>
+
+
 </configuration>

Added: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/conf/ConfServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/conf/ConfServlet.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/conf/ConfServlet.java (added)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/conf/ConfServlet.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.conf;
+
+import java.io.IOException;
+import java.io.Writer;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.http.HttpServer;
+
+/**
+ * A servlet to print out the running configuration data.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public class ConfServlet extends HttpServlet {
+  private static final long serialVersionUID = 1L;
+
+  private static final String FORMAT_JSON = "json";
+  private static final String FORMAT_XML = "xml";
+  private static final String FORMAT_PARAM = "format";
+
+  /**
+   * Return the Configuration of the daemon hosting this servlet.
+   * This is populated when the HttpServer starts.
+   */
+  private Configuration getConfFromContext() {
+    Configuration conf = (Configuration)getServletContext().getAttribute(
+        HttpServer.CONF_CONTEXT_ATTRIBUTE);
+    assert conf != null;
+    return conf;
+  }
+
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response)
+      throws ServletException, IOException {
+    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+        request, response)) {
+      return;
+    }
+    String format = request.getParameter(FORMAT_PARAM);
+    if (null == format) {
+      format = FORMAT_XML;
+    }
+
+    if (FORMAT_XML.equals(format)) {
+      response.setContentType("text/xml; charset=utf-8");
+    } else if (FORMAT_JSON.equals(format)) {
+      response.setContentType("application/json; charset=utf-8");
+    }
+
+    Writer out = response.getWriter();
+    try {
+      writeResponse(getConfFromContext(), out, format);
+    } catch (BadFormatException bfe) {
+      response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
+    }
+    out.close();
+  }
+
+  /**
+   * Guts of the servlet - extracted for easy testing.
+   */
+  static void writeResponse(Configuration conf, Writer out, String format)
+    throws IOException, BadFormatException {
+    if (FORMAT_JSON.equals(format)) {
+      Configuration.dumpConfiguration(conf, out);
+    } else if (FORMAT_XML.equals(format)) {
+      conf.writeXml(out);
+    } else {
+      throw new BadFormatException("Bad format: " + format);
+    }
+  }
+
+  public static class BadFormatException extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    public BadFormatException(String msg) {
+      super(msg);
+    }
+  }
+
+}