You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2013/01/23 19:51:32 UTC
svn commit: r1437623 [2/2] - in
/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project: ./ conf/
dev-support/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/
hadoop-mapreduce-client/hadoop-mapreduce-client...
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java Wed Jan 23 18:51:24 2013
@@ -34,12 +34,12 @@ abstract class MergeThread<T,K,V> extend
private AtomicInteger numPending = new AtomicInteger(0);
private LinkedList<List<T>> pendingToBeMerged;
- protected final MergeManager<K,V> manager;
+ protected final MergeManagerImpl<K,V> manager;
private final ExceptionReporter reporter;
private boolean closed = false;
private final int mergeFactor;
- public MergeThread(MergeManager<K,V> manager, int mergeFactor,
+ public MergeThread(MergeManagerImpl<K,V> manager, int mergeFactor,
ExceptionReporter reporter) {
this.pendingToBeMerged = new LinkedList<List<T>>();
this.manager = manager;
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java Wed Jan 23 18:51:24 2013
@@ -21,17 +21,10 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapred.RawKeyValueIterator;
-import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Task;
-import org.apache.hadoop.mapred.Task.CombineOutputCollector;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapred.ShuffleConsumerPlugin;
@@ -77,17 +70,21 @@ public class Shuffle<K, V> implements Sh
this.taskStatus = context.getStatus();
this.reduceTask = context.getReduceTask();
- scheduler =
- new ShuffleScheduler<K,V>(jobConf, taskStatus, this, copyPhase,
- context.getShuffledMapsCounter(),
- context.getReduceShuffleBytes(), context.getFailedShuffleCounter());
- merger = new MergeManager<K, V>(reduceId, jobConf, context.getLocalFS(),
- context.getLocalDirAllocator(), reporter, context.getCodec(),
- context.getCombinerClass(), context.getCombineCollector(),
- context.getSpilledRecordsCounter(),
- context.getReduceCombineInputCounter(),
- context.getMergedMapOutputsCounter(),
- this, context.getMergePhase(), context.getMapOutputFile());
+ scheduler = new ShuffleScheduler<K,V>(jobConf, taskStatus, this,
+ copyPhase, context.getShuffledMapsCounter(),
+ context.getReduceShuffleBytes(), context.getFailedShuffleCounter());
+ merger = createMergeManager(context);
+ }
+
+ protected MergeManager<K, V> createMergeManager(
+ ShuffleConsumerPlugin.Context context) {
+ return new MergeManagerImpl<K, V>(reduceId, jobConf, context.getLocalFS(),
+ context.getLocalDirAllocator(), reporter, context.getCodec(),
+ context.getCombinerClass(), context.getCombineCollector(),
+ context.getSpilledRecordsCounter(),
+ context.getReduceCombineInputCounter(),
+ context.getMergedMapOutputsCounter(), this, context.getMergePhase(),
+ context.getMapOutputFile());
}
@Override
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml Wed Jan 23 18:51:24 2013
@@ -858,6 +858,17 @@
</property>
<property>
+ <name>yarn.app.mapreduce.am.admin.user.env</name>
+ <value></value>
+ <description> Environment variables for the MR App Master
+ processes for admin purposes. These values are set first and can be
+ overridden by the user env (yarn.app.mapreduce.am.env) Example :
+ 1) A=foo This will set the env variable A to foo
+ 2) B=$B:c This is inherit app master's B env variable.
+ </description>
+</property>
+
+<property>
<name>yarn.app.mapreduce.am.command-opts</name>
<value>-Xmx1024m</value>
<description>Java opts for the MR App Master processes.
Propchange: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml:r1433249-1437619
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java Wed Jan 23 18:51:24 2013
@@ -53,7 +53,7 @@ public class TestFetcher {
private HttpURLConnection connection;
public FakeFetcher(JobConf job, TaskAttemptID reduceId,
- ShuffleScheduler<K,V> scheduler, MergeManager<K,V> merger, Reporter reporter,
+ ShuffleScheduler<K,V> scheduler, MergeManagerImpl<K,V> merger, Reporter reporter,
ShuffleClientMetrics metrics, ExceptionReporter exceptionReporter,
SecretKey jobTokenSecret, HttpURLConnection connection) {
super(job, reduceId, scheduler, merger, reporter, metrics, exceptionReporter,
@@ -77,7 +77,7 @@ public class TestFetcher {
JobConf job = new JobConf();
TaskAttemptID id = TaskAttemptID.forName("attempt_0_1_r_1_1");
ShuffleScheduler<Text, Text> ss = mock(ShuffleScheduler.class);
- MergeManager<Text, Text> mm = mock(MergeManager.class);
+ MergeManagerImpl<Text, Text> mm = mock(MergeManagerImpl.class);
Reporter r = mock(Reporter.class);
ShuffleClientMetrics metrics = mock(ShuffleClientMetrics.class);
ExceptionReporter except = mock(ExceptionReporter.class);
@@ -132,7 +132,7 @@ public class TestFetcher {
JobConf job = new JobConf();
TaskAttemptID id = TaskAttemptID.forName("attempt_0_1_r_1_1");
ShuffleScheduler<Text, Text> ss = mock(ShuffleScheduler.class);
- MergeManager<Text, Text> mm = mock(MergeManager.class);
+ MergeManagerImpl<Text, Text> mm = mock(MergeManagerImpl.class);
Reporter r = mock(Reporter.class);
ShuffleClientMetrics metrics = mock(ShuffleClientMetrics.class);
ExceptionReporter except = mock(ExceptionReporter.class);
@@ -167,10 +167,9 @@ public class TestFetcher {
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
- //Defaults to WAIT, which is what we want to test
- MapOutput<Text,Text> mapOut = new MapOutput<Text, Text>(map1ID);
+ //Defaults to null, which is what we want to test
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
- .thenReturn(mapOut);
+ .thenReturn(null);
underTest.copyFromHost(host);
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java Wed Jan 23 18:51:24 2013
@@ -32,13 +32,13 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.task.reduce.MapOutput.Type;
import org.junit.Assert;
import org.junit.Test;
public class TestMergeManager {
@Test(timeout=10000)
+ @SuppressWarnings("unchecked")
public void testMemoryMerge() throws Exception {
final int TOTAL_MEM_BYTES = 10000;
final int OUTPUT_SIZE = 7950;
@@ -55,45 +55,47 @@ public class TestMergeManager {
// reserve enough map output to cause a merge when it is committed
MapOutput<Text, Text> out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
- Assert.assertEquals("Should be a memory merge",
- Type.MEMORY, out1.getType());
- fillOutput(out1);
+ Assert.assertTrue("Should be a memory merge",
+ (out1 instanceof InMemoryMapOutput));
+ InMemoryMapOutput<Text, Text> mout1 = (InMemoryMapOutput<Text, Text>)out1;
+ fillOutput(mout1);
MapOutput<Text, Text> out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
- Assert.assertEquals("Should be a memory merge",
- Type.MEMORY, out2.getType());
- fillOutput(out2);
+ Assert.assertTrue("Should be a memory merge",
+ (out2 instanceof InMemoryMapOutput));
+ InMemoryMapOutput<Text, Text> mout2 = (InMemoryMapOutput<Text, Text>)out2;
+ fillOutput(mout2);
// next reservation should be a WAIT
MapOutput<Text, Text> out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
- Assert.assertEquals("Should be told to wait",
- Type.WAIT, out3.getType());
+ Assert.assertEquals("Should be told to wait", null, out3);
// trigger the first merge and wait for merge thread to start merging
// and free enough output to reserve more
- out1.commit();
- out2.commit();
+ mout1.commit();
+ mout2.commit();
mergeStart.await();
Assert.assertEquals(1, mgr.getNumMerges());
// reserve enough map output to cause another merge when committed
out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
- Assert.assertEquals("Should be a memory merge",
- Type.MEMORY, out1.getType());
- fillOutput(out1);
+ Assert.assertTrue("Should be a memory merge",
+ (out1 instanceof InMemoryMapOutput));
+ mout1 = (InMemoryMapOutput<Text, Text>)out1;
+ fillOutput(mout1);
out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
- Assert.assertEquals("Should be a memory merge",
- Type.MEMORY, out2.getType());
- fillOutput(out2);
+ Assert.assertTrue("Should be a memory merge",
+ (out2 instanceof InMemoryMapOutput));
+ mout2 = (InMemoryMapOutput<Text, Text>)out2;
+ fillOutput(mout2);
- // next reservation should be a WAIT
+ // next reservation should be null
out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
- Assert.assertEquals("Should be told to wait",
- Type.WAIT, out3.getType());
+ Assert.assertEquals("Should be told to wait", null, out3);
// commit output *before* merge thread completes
- out1.commit();
- out2.commit();
+ mout1.commit();
+ mout2.commit();
// allow the first merge to complete
mergeComplete.await();
@@ -110,7 +112,7 @@ public class TestMergeManager {
0, reporter.getNumExceptions());
}
- private void fillOutput(MapOutput<Text, Text> output) throws IOException {
+ private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
BoundedByteArrayOutputStream stream = output.getArrayStream();
int count = stream.getLimit();
for (int i=0; i < count; ++i) {
@@ -118,7 +120,7 @@ public class TestMergeManager {
}
}
- private static class StubbedMergeManager extends MergeManager<Text, Text> {
+ private static class StubbedMergeManager extends MergeManagerImpl<Text, Text> {
private TestMergeThread mergeThread;
public StubbedMergeManager(JobConf conf, ExceptionReporter reporter,
@@ -129,7 +131,7 @@ public class TestMergeManager {
}
@Override
- protected MergeThread<MapOutput<Text, Text>, Text, Text> createInMemoryMerger() {
+ protected MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> createInMemoryMerger() {
mergeThread = new TestMergeThread(this, getExceptionReporter());
return mergeThread;
}
@@ -140,12 +142,12 @@ public class TestMergeManager {
}
private static class TestMergeThread
- extends MergeThread<MapOutput<Text,Text>, Text, Text> {
+ extends MergeThread<InMemoryMapOutput<Text,Text>, Text, Text> {
private AtomicInteger numMerges;
private CyclicBarrier mergeStart;
private CyclicBarrier mergeComplete;
- public TestMergeThread(MergeManager<Text, Text> mergeManager,
+ public TestMergeThread(MergeManagerImpl<Text, Text> mergeManager,
ExceptionReporter reporter) {
super(mergeManager, Integer.MAX_VALUE, reporter);
numMerges = new AtomicInteger(0);
@@ -162,11 +164,11 @@ public class TestMergeManager {
}
@Override
- public void merge(List<MapOutput<Text, Text>> inputs)
+ public void merge(List<InMemoryMapOutput<Text, Text>> inputs)
throws IOException {
synchronized (this) {
numMerges.incrementAndGet();
- for (MapOutput<Text, Text> input : inputs) {
+ for (InMemoryMapOutput<Text, Text> input : inputs) {
manager.unreserve(input.getSize());
}
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java Wed Jan 23 18:51:24 2013
@@ -36,6 +36,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobACLsManager;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.TaskID;
@@ -183,13 +184,13 @@ public class CompletedJob implements org
}
@Override
- public synchronized TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents(
+ public synchronized TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
if (mapCompletionEvents == null) {
constructTaskAttemptCompletionEvents();
}
- return getAttemptCompletionEvents(mapCompletionEvents,
- startIndex, maxEvents);
+ return TypeConverter.fromYarn(getAttemptCompletionEvents(
+ mapCompletionEvents, startIndex, maxEvents));
}
private static TaskAttemptCompletionEvent[] getAttemptCompletionEvents(
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java Wed Jan 23 18:51:24 2013
@@ -25,6 +25,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
@@ -154,7 +155,7 @@ public class PartialJob implements org.a
}
@Override
- public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents(
+ public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
return null;
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java Wed Jan 23 18:51:24 2013
@@ -23,6 +23,7 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -143,7 +144,7 @@ public class MockHistoryJobs extends Moc
}
@Override
- public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents(
+ public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
return job.getMapAttemptCompletionEvents(startIndex, maxEvents);
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java Wed Jan 23 18:51:24 2013
@@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -69,14 +70,16 @@ import org.apache.hadoop.security.token.
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ClientToken;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier;
import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.ProtoUtils;
public class ClientServiceDelegate {
private static final Log LOG = LogFactory.getLog(ClientServiceDelegate.class);
@@ -176,13 +179,10 @@ public class ClientServiceDelegate {
serviceAddr = NetUtils.createSocketAddrForHost(
application.getHost(), application.getRpcPort());
if (UserGroupInformation.isSecurityEnabled()) {
- String clientTokenEncoded = application.getClientToken();
- Token<ApplicationTokenIdentifier> clientToken =
- new Token<ApplicationTokenIdentifier>();
- clientToken.decodeFromUrlString(clientTokenEncoded);
- // RPC layer client expects ip:port as service for tokens
- SecurityUtil.setTokenService(clientToken, serviceAddr);
- newUgi.addToken(clientToken);
+ ClientToken clientToken = application.getClientToken();
+ Token<ClientTokenIdentifier> token =
+ ProtoUtils.convertFromProtoFormat(clientToken, serviceAddr);
+ newUgi.addToken(token);
}
LOG.debug("Connecting to " + serviceAddr);
final InetSocketAddress finalServiceAddr = serviceAddr;
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java Wed Jan 23 18:51:24 2013
@@ -62,8 +62,8 @@ import org.apache.hadoop.mapreduce.v2.ap
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -86,10 +86,9 @@ public class NotRunningJob implements MR
.newRecordInstance(ApplicationAttemptId.class);
// Setting AppState to NEW and finalStatus to UNDEFINED as they are never
- // used
- // for a non running job
+ // used for a non running job
return BuilderUtils.newApplicationReport(unknownAppId, unknownAttemptId,
- "N/A", "N/A", "N/A", "N/A", 0, "", YarnApplicationState.NEW, "N/A",
+ "N/A", "N/A", "N/A", "N/A", 0, null, YarnApplicationState.NEW, "N/A",
"N/A", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A");
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java Wed Jan 23 18:51:24 2013
@@ -101,7 +101,7 @@ public class YARNRunner implements Clien
private Configuration conf;
private final FileContext defaultFileContext;
- /* usually is false unless the jobclient getdelegation token is
+ /* usually is false unless the jobclient get delegation token is
* called. This is a hack wherein we do return a token from RM
* on getDelegationtoken but due to the restricted api on jobclient
* we just add a job history DT token when submitting a job.
@@ -165,12 +165,12 @@ public class YARNRunner implements Clien
@Override
public TaskTrackerInfo[] getActiveTrackers() throws IOException,
InterruptedException {
- return resMgrDelegate.getActiveTrackers();
+ return resMgrDelegate.getActiveTrackers();
}
@Override
public JobStatus[] getAllJobs() throws IOException, InterruptedException {
- return resMgrDelegate.getAllJobs();
+ return resMgrDelegate.getAllJobs();
}
@Override
@@ -394,14 +394,31 @@ public class YARNRunner implements Clien
MRJobConfig.MR_AM_LOG_LEVEL, MRJobConfig.DEFAULT_MR_AM_LOG_LEVEL);
MRApps.addLog4jSystemProperties(logLevel, logSize, vargs);
+ // Check for Java Lib Path usage in MAP and REDUCE configs
+ warnForJavaLibPath(conf.get(MRJobConfig.MAP_JAVA_OPTS,""), "map",
+ MRJobConfig.MAP_JAVA_OPTS, MRJobConfig.MAP_ENV);
+ warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,""), "map",
+ MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
+ warnForJavaLibPath(conf.get(MRJobConfig.REDUCE_JAVA_OPTS,""), "reduce",
+ MRJobConfig.REDUCE_JAVA_OPTS, MRJobConfig.REDUCE_ENV);
+ warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,""), "reduce",
+ MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
+
// Add AM admin command opts before user command opts
// so that it can be overridden by user
- vargs.add(conf.get(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
- MRJobConfig.DEFAULT_MR_AM_ADMIN_COMMAND_OPTS));
+ String mrAppMasterAdminOptions = conf.get(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
+ MRJobConfig.DEFAULT_MR_AM_ADMIN_COMMAND_OPTS);
+ warnForJavaLibPath(mrAppMasterAdminOptions, "app master",
+ MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, MRJobConfig.MR_AM_ADMIN_USER_ENV);
+ vargs.add(mrAppMasterAdminOptions);
+
+ // Add AM user command opts
+ String mrAppMasterUserOptions = conf.get(MRJobConfig.MR_AM_COMMAND_OPTS,
+ MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS);
+ warnForJavaLibPath(mrAppMasterUserOptions, "app master",
+ MRJobConfig.MR_AM_COMMAND_OPTS, MRJobConfig.MR_AM_ENV);
+ vargs.add(mrAppMasterUserOptions);
- vargs.add(conf.get(MRJobConfig.MR_AM_COMMAND_OPTS,
- MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS));
-
vargs.add(MRJobConfig.APPLICATION_MASTER_CLASS);
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
Path.SEPARATOR + ApplicationConstants.STDOUT);
@@ -425,6 +442,9 @@ public class YARNRunner implements Clien
Map<String, String> environment = new HashMap<String, String>();
MRApps.setClasspath(environment, conf);
+ // Setup the environment variables for Admin first
+ MRApps.setEnvFromInputString(environment,
+ conf.get(MRJobConfig.MR_AM_ADMIN_USER_ENV));
// Setup the environment variables (LD_LIBRARY_PATH, etc)
MRApps.setEnvFromInputString(environment,
conf.get(MRJobConfig.MR_AM_ENV));
@@ -582,4 +602,15 @@ public class YARNRunner implements Clien
throws IOException {
return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
+
+ private static void warnForJavaLibPath(String opts, String component,
+ String javaConf, String envConf) {
+ if (opts != null && opts.contains("-Djava.library.path")) {
+ LOG.warn("Usage of -Djava.library.path in " + javaConf + " can cause " +
+ "programs to no longer function if hadoop native libraries " +
+ "are used. These values should be set as part of the " +
+ "LD_LIBRARY_PATH in the " + component + " JVM env using " +
+ envConf + " config settings.");
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java Wed Jan 23 18:51:24 2013
@@ -90,8 +90,8 @@ public abstract class NotificationTestCa
}
public static class NotificationServlet extends HttpServlet {
- public static int counter = 0;
- public static int failureCounter = 0;
+ public static volatile int counter = 0;
+ public static volatile int failureCounter = 0;
private static final long serialVersionUID = 1L;
protected void doGet(HttpServletRequest req, HttpServletResponse res)
@@ -155,7 +155,11 @@ public abstract class NotificationTestCa
System.out.println(launchWordCount(this.createJobConf(),
"a b c d e f g h", 1, 1));
- Thread.sleep(2000);
+ boolean keepTrying = true;
+ for (int tries = 0; tries < 30 && keepTrying; tries++) {
+ Thread.sleep(50);
+ keepTrying = !(NotificationServlet.counter == 2);
+ }
assertEquals(2, NotificationServlet.counter);
assertEquals(0, NotificationServlet.failureCounter);
@@ -173,14 +177,22 @@ public abstract class NotificationTestCa
// run a job with KILLED status
System.out.println(UtilsForTests.runJobKill(this.createJobConf(), inDir,
outDir).getID());
- Thread.sleep(2000);
+ keepTrying = true;
+ for (int tries = 0; tries < 30 && keepTrying; tries++) {
+ Thread.sleep(50);
+ keepTrying = !(NotificationServlet.counter == 4);
+ }
assertEquals(4, NotificationServlet.counter);
assertEquals(0, NotificationServlet.failureCounter);
// run a job with FAILED status
System.out.println(UtilsForTests.runJobFail(this.createJobConf(), inDir,
outDir).getID());
- Thread.sleep(2000);
+ keepTrying = true;
+ for (int tries = 0; tries < 30 && keepTrying; tries++) {
+ Thread.sleep(50);
+ keepTrying = !(NotificationServlet.counter == 6);
+ }
assertEquals(6, NotificationServlet.counter);
assertEquals(0, NotificationServlet.failureCounter);
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java Wed Jan 23 18:51:24 2013
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
+import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@@ -83,6 +84,11 @@ import org.apache.hadoop.yarn.api.record
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Logger;
+import org.apache.log4j.SimpleLayout;
+import org.apache.log4j.WriterAppender;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
@@ -112,6 +118,7 @@ public class TestYARNRunner extends Test
public void setUp() throws Exception {
resourceMgrDelegate = mock(ResourceMgrDelegate.class);
conf = new YarnConfiguration();
+ conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
clientCache = new ClientCache(conf, resourceMgrDelegate);
clientCache = spy(clientCache);
yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
@@ -188,7 +195,7 @@ public class TestYARNRunner extends Test
@Test
public void testResourceMgrDelegate() throws Exception {
- /* we not want a mock of resourcemgr deleagte */
+ /* we not want a mock of resource mgr delegate */
final ClientRMProtocol clientRMProtocol = mock(ClientRMProtocol.class);
ResourceMgrDelegate delegate = new ResourceMgrDelegate(conf) {
@Override
@@ -255,6 +262,9 @@ public class TestYARNRunner extends Test
@Test
public void testHistoryServerToken() throws Exception {
+ //Set the master principal in the config
+ conf.set(YarnConfiguration.RM_PRINCIPAL,"foo@LOCAL");
+
final String masterPrincipal = Master.getMasterPrincipal(conf);
final MRClientProtocol hsProxy = mock(MRClientProtocol.class);
@@ -264,7 +274,7 @@ public class TestYARNRunner extends Test
GetDelegationTokenRequest request =
(GetDelegationTokenRequest)invocation.getArguments()[0];
// check that the renewer matches the cluster's RM principal
- assertEquals(request.getRenewer(), masterPrincipal);
+ assertEquals(masterPrincipal, request.getRenewer() );
DelegationToken token =
recordFactory.newRecordInstance(DelegationToken.class);
@@ -356,4 +366,53 @@ public class TestYARNRunner extends Test
assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex);
}
}
+ @Test
+ public void testWarnCommandOpts() throws Exception {
+ Logger logger = Logger.getLogger(YARNRunner.class);
+
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ Layout layout = new SimpleLayout();
+ Appender appender = new WriterAppender(layout, bout);
+ logger.addAppender(appender);
+
+ JobConf jobConf = new JobConf();
+
+ jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
+ jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
+
+ YARNRunner yarnRunner = new YARNRunner(jobConf);
+
+ File jobxml = new File(testWorkDir, MRJobConfig.JOB_CONF_FILE);
+ OutputStream out = new FileOutputStream(jobxml);
+ conf.writeXml(out);
+ out.close();
+
+ File jobsplit = new File(testWorkDir, MRJobConfig.JOB_SPLIT);
+ out = new FileOutputStream(jobsplit);
+ out.close();
+
+ File jobsplitmetainfo = new File(testWorkDir, MRJobConfig.JOB_SPLIT_METAINFO);
+ out = new FileOutputStream(jobsplitmetainfo);
+ out.close();
+
+ File appTokens = new File(testWorkDir, MRJobConfig.APPLICATION_TOKENS_FILE);
+ out = new FileOutputStream(appTokens);
+ out.close();
+
+ @SuppressWarnings("unused")
+ ApplicationSubmissionContext submissionContext =
+ yarnRunner.createApplicationSubmissionContext(jobConf, testWorkDir.toString(), new Credentials());
+
+ String logMsg = bout.toString();
+ assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
+ "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " +
+ "longer function if hadoop native libraries are used. These values " +
+ "should be set as part of the LD_LIBRARY_PATH in the app master JVM " +
+ "env using yarn.app.mapreduce.am.admin.user.env config settings."));
+ assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
+ "yarn.app.mapreduce.am.command-opts can cause programs to no longer " +
+ "function if hadoop native libraries are used. These values should " +
+ "be set as part of the LD_LIBRARY_PATH in the app master JVM env " +
+ "using yarn.app.mapreduce.am.env config settings."));
+ }
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java Wed Jan 23 18:51:24 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.examples;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.RoundingMode;
+import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -77,8 +78,7 @@ public class QuasiMonteCarlo extends Con
static final String DESCRIPTION
= "A map/reduce program that estimates Pi using a quasi-Monte Carlo method.";
/** tmp directory for input/output */
- static private final Path TMP_DIR = new Path(
- QuasiMonteCarlo.class.getSimpleName() + "_TMP_3_141592654");
+ static private final String TMP_DIR_PREFIX = QuasiMonteCarlo.class.getSimpleName();
/** 2-dimensional Halton sequence {H(i)},
* where H(i) is a 2-dimensional point and i >= 1 is the index.
@@ -228,9 +228,9 @@ public class QuasiMonteCarlo extends Con
@Override
public void cleanup(Context context) throws IOException {
//write output to a file
- Path outDir = new Path(TMP_DIR, "out");
- Path outFile = new Path(outDir, "reduce-out");
Configuration conf = context.getConfiguration();
+ Path outDir = new Path(conf.get(FileOutputFormat.OUTDIR));
+ Path outFile = new Path(outDir, "reduce-out");
FileSystem fileSys = FileSystem.get(conf);
SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf,
outFile, LongWritable.class, LongWritable.class,
@@ -246,7 +246,7 @@ public class QuasiMonteCarlo extends Con
* @return the estimated value of Pi
*/
public static BigDecimal estimatePi(int numMaps, long numPoints,
- Configuration conf
+ Path tmpDir, Configuration conf
) throws IOException, ClassNotFoundException, InterruptedException {
Job job = new Job(conf);
//setup job conf
@@ -269,14 +269,14 @@ public class QuasiMonteCarlo extends Con
job.setSpeculativeExecution(false);
//setup input/output directories
- final Path inDir = new Path(TMP_DIR, "in");
- final Path outDir = new Path(TMP_DIR, "out");
+ final Path inDir = new Path(tmpDir, "in");
+ final Path outDir = new Path(tmpDir, "out");
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
final FileSystem fs = FileSystem.get(conf);
- if (fs.exists(TMP_DIR)) {
- throw new IOException("Tmp directory " + fs.makeQualified(TMP_DIR)
+ if (fs.exists(tmpDir)) {
+ throw new IOException("Tmp directory " + fs.makeQualified(tmpDir)
+ " already exists. Please remove it first.");
}
if (!fs.mkdirs(inDir)) {
@@ -325,7 +325,7 @@ public class QuasiMonteCarlo extends Con
.multiply(BigDecimal.valueOf(numInside.get()))
.divide(numTotal, RoundingMode.HALF_UP);
} finally {
- fs.delete(TMP_DIR, true);
+ fs.delete(tmpDir, true);
}
}
@@ -344,12 +344,15 @@ public class QuasiMonteCarlo extends Con
final int nMaps = Integer.parseInt(args[0]);
final long nSamples = Long.parseLong(args[1]);
+ long now = System.currentTimeMillis();
+ int rand = new Random().nextInt(Integer.MAX_VALUE);
+ final Path tmpDir = new Path(TMP_DIR_PREFIX + "_" + now + "_" + rand);
System.out.println("Number of Maps = " + nMaps);
System.out.println("Samples per Map = " + nSamples);
System.out.println("Estimated value of Pi is "
- + estimatePi(nMaps, nSamples, getConf()));
+ + estimatePi(nMaps, nSamples, tmpDir, getConf()));
return 0;
}
Modified: hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java?rev=1437623&r1=1437622&r2=1437623&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java Wed Jan 23 18:51:24 2013
@@ -174,16 +174,16 @@ public class DistributedPentomino extend
return 2;
}
// check for passed parameters, otherwise use defaults
- int width = PENT_WIDTH;
- int height = PENT_HEIGHT;
- int depth = PENT_DEPTH;
+ int width = conf.getInt(Pentomino.WIDTH, PENT_WIDTH);
+ int height = conf.getInt(Pentomino.HEIGHT, PENT_HEIGHT);
+ int depth = conf.getInt(Pentomino.DEPTH, PENT_DEPTH);
for (int i = 0; i < args.length; i++) {
if (args[i].equalsIgnoreCase("-depth")) {
- depth = Integer.parseInt(args[i++].trim());
+ depth = Integer.parseInt(args[++i].trim());
} else if (args[i].equalsIgnoreCase("-height")) {
- height = Integer.parseInt(args[i++].trim());
+ height = Integer.parseInt(args[++i].trim());
} else if (args[i].equalsIgnoreCase("-width") ) {
- width = Integer.parseInt(args[i++].trim());
+ width = Integer.parseInt(args[++i].trim());
}
}
// now set the values within conf for M/R tasks to read, this