You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by se...@apache.org on 2014/06/22 23:47:22 UTC
[01/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Repository: incubator-flink
Updated Branches:
refs/heads/master 7b6b5a2e0 -> b4b633eab
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingMixedOrderITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingMixedOrderITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingMixedOrderITCase.java
index 51d7a66..a9bda2b 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingMixedOrderITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingMixedOrderITCase.java
@@ -36,42 +36,44 @@ import eu.stratosphere.types.Key;
public class GlobalSortingMixedOrderITCase extends RecordAPITestBase {
private static final int NUM_RECORDS = 100000;
-
+
private static final int RANGE_I1 = 100;
private static final int RANGE_I2 = 20;
private static final int RANGE_I3 = 20;
-
+
private String recordsPath;
private String resultPath;
private String sortedRecords;
-
+ public GlobalSortingMixedOrderITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
protected void preSubmit() throws Exception {
-
+
ArrayList<TripleInt> records = new ArrayList<TripleInt>();
-
+
//Generate records
final Random rnd = new Random(1988);
final StringBuilder sb = new StringBuilder(NUM_RECORDS * 7);
-
-
+
+
for (int j = 0; j < NUM_RECORDS; j++) {
TripleInt val = new TripleInt(rnd.nextInt(RANGE_I1), rnd.nextInt(RANGE_I2), rnd.nextInt(RANGE_I3));
records.add(val);
sb.append(val);
sb.append('\n');
}
-
-
+
+
this.recordsPath = createTempFile("records", sb.toString());
this.resultPath = getTempDirPath("result");
// create the sorted result;
Collections.sort(records);
-
+
sb.setLength(0);
for (TripleInt val : records) {
sb.append(val);
@@ -83,7 +85,7 @@ public class GlobalSortingMixedOrderITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
GlobalSort globalSort = new GlobalSort();
- return globalSort.getPlan("4", recordsPath, resultPath);
+ return globalSort.getPlan(new Integer(DOP).toString(), recordsPath, resultPath);
}
@Override
@@ -91,22 +93,22 @@ public class GlobalSortingMixedOrderITCase extends RecordAPITestBase {
// Test results
compareResultsByLinesInMemoryWithStrictOrder(this.sortedRecords, this.resultPath);
}
-
-
+
+
public static class TripleIntDistribution implements DataDistribution {
-
+
private static final long serialVersionUID = 1L;
-
+
private boolean ascendingI1, ascendingI2, ascendingI3;
-
+
public TripleIntDistribution(Order orderI1, Order orderI2, Order orderI3) {
this.ascendingI1 = orderI1 != Order.DESCENDING;
this.ascendingI2 = orderI2 != Order.DESCENDING;
this.ascendingI3 = orderI3 != Order.DESCENDING;
}
-
+
public TripleIntDistribution() {}
-
+
@Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(this.ascendingI1);
@@ -129,7 +131,7 @@ public class GlobalSortingMixedOrderITCase extends RecordAPITestBase {
if (!this.ascendingI1) {
boundVal = RANGE_I1 - boundVal;
}
-
+
return new Key[] { new IntValue(boundVal), new IntValue(RANGE_I2), new IntValue(RANGE_I3) };
}
@@ -137,11 +139,11 @@ public class GlobalSortingMixedOrderITCase extends RecordAPITestBase {
public int getNumberOfFields() {
return 3;
}
-
+
}
-
+
private static class GlobalSort implements Program {
-
+
private static final long serialVersionUID = 1L;
@Override
@@ -150,10 +152,10 @@ public class GlobalSortingMixedOrderITCase extends RecordAPITestBase {
final int numSubtasks = (args.length > 0 ? Integer.parseInt(args[0]) : 1);
final String recordsPath = (args.length > 1 ? args[1] : "");
final String output = (args.length > 2 ? args[2] : "");
-
+
@SuppressWarnings("unchecked")
FileDataSource source = new FileDataSource(new CsvInputFormat(',', IntValue.class, IntValue.class, IntValue.class), recordsPath);
-
+
FileDataSink sink = new FileDataSink(CsvOutputFormat.class, output);
CsvOutputFormat.configureRecordFormat(sink)
.recordDelimiter('\n')
@@ -162,34 +164,34 @@ public class GlobalSortingMixedOrderITCase extends RecordAPITestBase {
.field(IntValue.class, 0)
.field(IntValue.class, 1)
.field(IntValue.class, 2);
-
+
sink.setGlobalOrder(
new Ordering(0, IntValue.class, Order.DESCENDING)
.appendOrdering(1, IntValue.class, Order.ASCENDING)
.appendOrdering(2, IntValue.class, Order.DESCENDING),
new TripleIntDistribution(Order.DESCENDING, Order.ASCENDING, Order.DESCENDING));
sink.setInput(source);
-
+
Plan p = new Plan(sink);
p.setDefaultParallelism(numSubtasks);
return p;
}
}
-
+
/**
* Three integers sorting descending, ascending, descending.
*/
private static final class TripleInt implements Comparable<TripleInt> {
-
+
private final int i1, i2, i3;
-
+
private TripleInt(int i1, int i2, int i3) {
this.i1 = i1;
this.i2 = i2;
this.i3 = i3;
}
-
+
@Override
public String toString() {
StringBuilder bld = new StringBuilder(32);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GroupOrderReduceITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GroupOrderReduceITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GroupOrderReduceITCase.java
index d9b244c..48479ed 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GroupOrderReduceITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GroupOrderReduceITCase.java
@@ -60,6 +60,7 @@ public class GroupOrderReduceITCase extends RecordAPITestBase {
public GroupOrderReduceITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@@ -104,7 +105,7 @@ public class GroupOrderReduceITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("GroupOrderTest#NumSubtasks", 4);
+ config.setInteger("GroupOrderTest#NumSubtasks", DOP);
return toParameterList(config);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/MergeOnlyJoinITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/MergeOnlyJoinITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/MergeOnlyJoinITCase.java
index 47a0f59..f19ceee 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/MergeOnlyJoinITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/MergeOnlyJoinITCase.java
@@ -62,6 +62,7 @@ public class MergeOnlyJoinITCase extends RecordAPITestBase {
public MergeOnlyJoinITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(4);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/PairwiseSPITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/PairwiseSPITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/PairwiseSPITCase.java
index 38168a2..52de6ab 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/PairwiseSPITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/PairwiseSPITCase.java
@@ -66,7 +66,7 @@ public class PairwiseSPITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
PairwiseSP a2aSP = new PairwiseSP();
- return a2aSP.getPlan(config.getString("All2AllSPTest#NoSubtasks", "4"),
+ return a2aSP.getPlan(config.getString("All2AllSPTest#NoSubtasks", new Integer(DOP).toString()),
rdfDataPath,
resultPath,
"true");
@@ -80,7 +80,7 @@ public class PairwiseSPITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("All2AllSPTest#NoSubtasks", 4);
+ config.setInteger("All2AllSPTest#NoSubtasks", DOP);
return toParameterList(config);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery10ITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery10ITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery10ITCase.java
index a19cad1..5450498 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery10ITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery10ITCase.java
@@ -200,7 +200,7 @@ public class TPCHQuery10ITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("TPCHQuery10Test#NoSubtasks", 4);
+ config.setInteger("TPCHQuery10Test#NoSubtasks", DOP);
return toParameterList(config);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3ITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3ITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3ITCase.java
index cc1d16a..54a5fe1 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3ITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3ITCase.java
@@ -122,6 +122,7 @@ public class TPCHQuery3ITCase extends RecordAPITestBase {
public TPCHQuery3ITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -150,7 +151,7 @@ public class TPCHQuery3ITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("dop", 4);
+ config.setInteger("dop", DOP);
return toParameterList(config);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3WithUnionITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3WithUnionITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3WithUnionITCase.java
index b0c5200..df95484 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3WithUnionITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery3WithUnionITCase.java
@@ -121,6 +121,10 @@ public class TPCHQuery3WithUnionITCase extends RecordAPITestBase {
private static final String EXPECTED_RESULT = "5|0|147828.97\n" + "66|0|99188.09\n";
+ public TPCHQuery3WithUnionITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
@@ -136,7 +140,7 @@ public class TPCHQuery3WithUnionITCase extends RecordAPITestBase {
protected Plan getTestJob() {
TPCHQuery3Unioned tpch3 = new TPCHQuery3Unioned();
return tpch3.getPlan(
- "4",
+ new Integer(DOP).toString(),
orders1Path,
orders2Path,
partJoin1Path,
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery4ITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery4ITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery4ITCase.java
index d8b9275..50097be 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery4ITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery4ITCase.java
@@ -112,6 +112,10 @@ public class TPCHQuery4ITCase extends RecordAPITestBase {
private static final String EXPECTED_RESULT = "1-URGENT|2|\n" + "3-MEDIUM|2|\n" + "4-NOT SPECIFIED|4|";
+ public TPCHQuery4ITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
ordersPath = createTempFile("orders", ORDERS);
@@ -122,7 +126,7 @@ public class TPCHQuery4ITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
TPCHQuery4 tpch4 = new TPCHQuery4();
- return tpch4.getPlan("4", ordersPath, lineitemsPath, resultPath);
+ return tpch4.getPlan(new Integer(DOP).toString(), ordersPath, lineitemsPath, resultPath);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery9ITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery9ITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery9ITCase.java
index b1e9af6..a863236 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery9ITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQuery9ITCase.java
@@ -335,6 +335,10 @@ public class TPCHQuery9ITCase extends RecordAPITestBase {
+ "IRAN|1992|37970.953\n"
+ "IRAN|1993|83140.0\n"
+ "IRAN|1996|9672.556\n";
+
+ public TPCHQuery9ITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
@@ -352,7 +356,7 @@ public class TPCHQuery9ITCase extends RecordAPITestBase {
protected Plan getTestJob() {
TPCHQuery9 tpch9 = new TPCHQuery9();
return tpch9.getPlan(
- "4",
+ new Integer(DOP).toString(),
partInputPath,
partSuppInputPath,
ordersInputPath,
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQueryAsterixITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQueryAsterixITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQueryAsterixITCase.java
index 9c2ba26..0b6220c 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQueryAsterixITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TPCHQueryAsterixITCase.java
@@ -62,6 +62,10 @@ public class TPCHQueryAsterixITCase extends RecordAPITestBase {
"2|MACHINERY\n" +
"2|FURNITURE\n";
+ public TPCHQueryAsterixITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
@@ -73,7 +77,7 @@ public class TPCHQueryAsterixITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
TPCHQueryAsterix tpchBench = new TPCHQueryAsterix();
- return tpchBench.getPlan("4", ordersPath, custPath, resultPath);
+ return tpchBench.getPlan(new Integer(DOP).toString(), ordersPath, custPath, resultPath);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TeraSortITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TeraSortITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TeraSortITCase.java
index 97db904..ab2ee7b 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TeraSortITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/TeraSortITCase.java
@@ -29,7 +29,10 @@ public class TeraSortITCase extends RecordAPITestBase {
private static final String INPUT_DATA_FILE = "/testdata/terainput.txt";
private String resultPath;
-
+
+ public TeraSortITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
protected void preSubmit() throws Exception {
@@ -41,7 +44,7 @@ public class TeraSortITCase extends RecordAPITestBase {
String testDataPath = getClass().getResource(INPUT_DATA_FILE).toString();
TeraSort ts = new TeraSort();
- return ts.getPlan("4", testDataPath, resultPath);
+ return ts.getPlan(new Integer(DOP).toString(), testDataPath, resultPath);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WebLogAnalysisITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WebLogAnalysisITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WebLogAnalysisITCase.java
index 8b60d52..a0458c7 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WebLogAnalysisITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WebLogAnalysisITCase.java
@@ -148,6 +148,10 @@ public class WebLogAnalysisITCase extends RecordAPITestBase {
private static final String expected = "87|url_24|39\n" + "59|url_28|41\n";
+ public WebLogAnalysisITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
docsPath = createTempFile("docs", docs);
@@ -159,7 +163,7 @@ public class WebLogAnalysisITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
WebLogAnalysis relOLAP = new WebLogAnalysis();
- return relOLAP.getPlan("4", docsPath, ranksPath, visitsPath, resultPath);
+ return relOLAP.getPlan(new Integer(DOP).toString(), docsPath, ranksPath, visitsPath, resultPath);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountITCase.java
index 1a2d183..1adf4f8 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountITCase.java
@@ -23,6 +23,10 @@ public class WordCountITCase extends RecordAPITestBase {
protected String textPath;
protected String resultPath;
+ public WordCountITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
@@ -33,7 +37,7 @@ public class WordCountITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
WordCount wc = new WordCount();
- return wc.getPlan("4", textPath, resultPath);
+ return wc.getPlan(new Integer(DOP).toString(), textPath, resultPath);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountUnionReduceITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountUnionReduceITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountUnionReduceITCase.java
index 30ce102..35a14c5 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountUnionReduceITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/WordCountUnionReduceITCase.java
@@ -46,6 +46,10 @@ public class WordCountUnionReduceITCase extends RecordAPITestBase {
private String outputPath;
+ public WordCountUnionReduceITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
@@ -61,7 +65,7 @@ public class WordCountUnionReduceITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
WordCountUnionReduce wc = new WordCountUnionReduce();
- return wc.getPlan(this.inputPath, this.outputPath, 4);
+ return wc.getPlan(this.inputPath, this.outputPath, DOP);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
index fae3f99..a8ab311 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
@@ -30,6 +30,7 @@ import eu.stratosphere.runtime.io.api.RecordWriter;
import eu.stratosphere.runtime.io.channels.ChannelType;
import eu.stratosphere.test.util.RecordAPITestBase;
import eu.stratosphere.util.LogUtils;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.After;
@@ -53,9 +54,9 @@ public class NetworkStackThroughput extends RecordAPITestBase {
private static final String USE_FORWARDER_CONFIG_KEY = "use.forwarder";
- private static final String NUM_SUBTASKS_CONFIG_KEY = "num.subtasks";
+ private static final String PARALLELISM_CONFIG_KEY = "num.subtasks";
- private static final String NUM_SUBTASKS_PER_INSTANCE_CONFIG_KEY = "num.subtasks.instance";
+ private static final String NUM_SLOTS_PER_TM_CONFIG_KEY = "num.slots.per.tm";
private static final String IS_SLOW_SENDER_CONFIG_KEY = "is.slow.sender";
@@ -64,13 +65,35 @@ public class NetworkStackThroughput extends RecordAPITestBase {
private static final int IS_SLOW_SLEEP_MS = 10;
private static final int IS_SLOW_EVERY_NUM_RECORDS = (2 * 32 * 1024) / SpeedTestRecord.RECORD_SIZE;
+
+ // ------------------------------------------------------------------------
+
+ private int dataVolumeGb;
+ private boolean useForwarder;
+ private boolean isSlowSender;
+ private boolean isSlowReceiver;
+ private int parallelism;
// ------------------------------------------------------------------------
public NetworkStackThroughput(Configuration config) {
super(config);
-
- setNumTaskManager(2);
+
+ dataVolumeGb = this.config.getInteger(DATA_VOLUME_GB_CONFIG_KEY, 1);
+ useForwarder = this.config.getBoolean(USE_FORWARDER_CONFIG_KEY, true);
+ isSlowSender = this.config.getBoolean(IS_SLOW_SENDER_CONFIG_KEY, false);
+ isSlowReceiver = this.config.getBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, false);
+ parallelism = config.getInteger(PARALLELISM_CONFIG_KEY, 1);
+
+ int numSlots = config.getInteger(NUM_SLOTS_PER_TM_CONFIG_KEY, 1);
+
+ if (parallelism % numSlots != 0) {
+ throw new RuntimeException("The test case defines a parallelism that is not a multiple of the slots per task manager.");
+ }
+
+ setNumTaskTracker(parallelism / numSlots);
+ setTaskManagerNumSlots(numSlots);
+
LogUtils.initializeDefaultConsoleLogger();
}
@@ -94,8 +117,8 @@ public class NetworkStackThroughput extends RecordAPITestBase {
config.setBoolean(USE_FORWARDER_CONFIG_KEY, (Boolean) p[1]);
config.setBoolean(IS_SLOW_SENDER_CONFIG_KEY, (Boolean) p[2]);
config.setBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, (Boolean) p[3]);
- config.setInteger(NUM_SUBTASKS_CONFIG_KEY, (Integer) p[4]);
- config.setInteger(NUM_SUBTASKS_PER_INSTANCE_CONFIG_KEY, (Integer) p[5]);
+ config.setInteger(PARALLELISM_CONFIG_KEY, (Integer) p[4]);
+ config.setInteger(NUM_SLOTS_PER_TM_CONFIG_KEY, (Integer) p[5]);
configs.add(config);
}
@@ -107,14 +130,7 @@ public class NetworkStackThroughput extends RecordAPITestBase {
@Override
protected JobGraph getJobGraph() throws Exception {
- int dataVolumeGb = this.config.getInteger(DATA_VOLUME_GB_CONFIG_KEY, 1);
- boolean useForwarder = this.config.getBoolean(USE_FORWARDER_CONFIG_KEY, true);
- boolean isSlowSender = this.config.getBoolean(IS_SLOW_SENDER_CONFIG_KEY, false);
- boolean isSlowReceiver = this.config.getBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, false);
- int numSubtasks = this.config.getInteger(NUM_SUBTASKS_CONFIG_KEY, 1);
- int numSubtasksPerInstance = this.config.getInteger(NUM_SUBTASKS_PER_INSTANCE_CONFIG_KEY, 1);
-
- return createJobGraph(dataVolumeGb, useForwarder, isSlowSender, isSlowReceiver, numSubtasks, numSubtasksPerInstance);
+ return createJobGraph(dataVolumeGb, useForwarder, isSlowSender, isSlowReceiver, parallelism);
}
@After
@@ -133,14 +149,13 @@ public class NetworkStackThroughput extends RecordAPITestBase {
}
private JobGraph createJobGraph(int dataVolumeGb, boolean useForwarder, boolean isSlowSender, boolean isSlowReceiver,
- int numSubtasks, int numSubtasksPerInstance) throws JobGraphDefinitionException {
+ int numSubtasks) throws JobGraphDefinitionException {
JobGraph jobGraph = new JobGraph("Speed Test");
JobInputVertex producer = new JobGenericInputVertex("Speed Test Producer", jobGraph);
producer.setInputClass(SpeedTestProducer.class);
producer.setNumberOfSubtasks(numSubtasks);
- producer.setNumberOfSubtasksPerInstance(numSubtasksPerInstance);
producer.getConfiguration().setInteger(DATA_VOLUME_GB_CONFIG_KEY, dataVolumeGb);
producer.getConfiguration().setBoolean(IS_SLOW_SENDER_CONFIG_KEY, isSlowSender);
@@ -149,13 +164,11 @@ public class NetworkStackThroughput extends RecordAPITestBase {
forwarder = new JobTaskVertex("Speed Test Forwarder", jobGraph);
forwarder.setTaskClass(SpeedTestForwarder.class);
forwarder.setNumberOfSubtasks(numSubtasks);
- forwarder.setNumberOfSubtasksPerInstance(numSubtasksPerInstance);
}
JobOutputVertex consumer = new JobOutputVertex("Speed Test Consumer", jobGraph);
consumer.setOutputClass(SpeedTestConsumer.class);
consumer.setNumberOfSubtasks(numSubtasks);
- consumer.setNumberOfSubtasksPerInstance(numSubtasksPerInstance);
consumer.getConfiguration().setBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, isSlowReceiver);
if (useForwarder) {
[03/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskExternalITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskExternalITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskExternalITCase.java
index 6fd366e..2d8113b 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskExternalITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskExternalITCase.java
@@ -33,6 +33,8 @@ import eu.stratosphere.types.Record;
public class CombineTaskExternalITCase extends DriverTestBase<GenericGroupReduce<Record, ?>> {
private static final long COMBINE_MEM = 3 * 1024 * 1024;
+
+ private final double combine_frac;
private final ArrayList<Record> outList = new ArrayList<Record>();
@@ -42,6 +44,8 @@ public class CombineTaskExternalITCase extends DriverTestBase<GenericGroupReduce
public CombineTaskExternalITCase() {
super(COMBINE_MEM, 0);
+
+ combine_frac = (double)COMBINE_MEM/this.getMemoryManager().getMemorySize();
}
@@ -55,7 +59,7 @@ public class CombineTaskExternalITCase extends DriverTestBase<GenericGroupReduce
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
- getTaskConfig().setMemoryDriver(COMBINE_MEM);
+ getTaskConfig().setRelativeMemoryDriver(combine_frac);
getTaskConfig().setFilehandlesDriver(2);
final GroupReduceCombineDriver<Record> testTask = new GroupReduceCombineDriver<Record>();
@@ -108,7 +112,7 @@ public class CombineTaskExternalITCase extends DriverTestBase<GenericGroupReduce
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
- getTaskConfig().setMemoryDriver(COMBINE_MEM);
+ getTaskConfig().setRelativeMemoryDriver(combine_frac);
getTaskConfig().setFilehandlesDriver(2);
final GroupReduceCombineDriver<Record> testTask = new GroupReduceCombineDriver<Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskTest.java
index 0917051..98e7003 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CombineTaskTest.java
@@ -39,6 +39,8 @@ import eu.stratosphere.util.Collector;
public class CombineTaskTest extends DriverTestBase<GenericGroupReduce<Record, ?>>
{
private static final long COMBINE_MEM = 3 * 1024 * 1024;
+
+ private final double combine_frac;
private final ArrayList<Record> outList = new ArrayList<Record>();
@@ -48,6 +50,8 @@ public class CombineTaskTest extends DriverTestBase<GenericGroupReduce<Record, ?
public CombineTaskTest() {
super(COMBINE_MEM, 0);
+
+ combine_frac = (double)COMBINE_MEM/this.getMemoryManager().getMemorySize();
}
@Test
@@ -60,7 +64,7 @@ public class CombineTaskTest extends DriverTestBase<GenericGroupReduce<Record, ?
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
- getTaskConfig().setMemoryDriver(COMBINE_MEM);
+ getTaskConfig().setRelativeMemoryDriver(combine_frac);
getTaskConfig().setFilehandlesDriver(2);
final GroupReduceCombineDriver<Record> testTask = new GroupReduceCombineDriver<Record>();
@@ -96,7 +100,7 @@ public class CombineTaskTest extends DriverTestBase<GenericGroupReduce<Record, ?
setOutput(new DiscardingOutputCollector<Record>());
getTaskConfig().setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
- getTaskConfig().setMemoryDriver(COMBINE_MEM);
+ getTaskConfig().setRelativeMemoryDriver(combine_frac);
getTaskConfig().setFilehandlesDriver(2);
final GroupReduceCombineDriver<Record> testTask = new GroupReduceCombineDriver<Record>();
@@ -120,7 +124,7 @@ public class CombineTaskTest extends DriverTestBase<GenericGroupReduce<Record, ?
setOutput(new DiscardingOutputCollector<Record>());
getTaskConfig().setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
- getTaskConfig().setMemoryDriver(COMBINE_MEM);
+ getTaskConfig().setRelativeMemoryDriver(combine_frac);
getTaskConfig().setFilehandlesDriver(2);
final GroupReduceCombineDriver<Record> testTask = new GroupReduceCombineDriver<Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskExternalITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskExternalITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskExternalITCase.java
index 19e6209..fdf1941 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskExternalITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskExternalITCase.java
@@ -27,11 +27,14 @@ import eu.stratosphere.types.Record;
public class CrossTaskExternalITCase extends DriverTestBase<GenericCrosser<Record, Record, Record>>
{
private static final long CROSS_MEM = 1024 * 1024;
+
+ private final double cross_frac;
private final CountingOutputCollector output = new CountingOutputCollector();
public CrossTaskExternalITCase() {
super(CROSS_MEM, 0);
+ cross_frac = (double)CROSS_MEM/this.getMemoryManager().getMemorySize();
}
@Test
@@ -52,7 +55,7 @@ public class CrossTaskExternalITCase extends DriverTestBase<GenericCrosser<Recor
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -84,7 +87,7 @@ public class CrossTaskExternalITCase extends DriverTestBase<GenericCrosser<Recor
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskTest.java
index 8667a6f..baa9589 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/CrossTaskTest.java
@@ -32,11 +32,15 @@ import eu.stratosphere.util.Collector;
public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record, Record>>
{
private static final long CROSS_MEM = 1024 * 1024;
+
+ private final double cross_frac;
private final CountingOutputCollector output = new CountingOutputCollector();
public CrossTaskTest() {
super(CROSS_MEM, 0);
+
+ cross_frac = (double)CROSS_MEM/this.getMemoryManager().getMemorySize();
}
@Test
@@ -56,7 +60,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -86,7 +90,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -114,7 +118,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -144,7 +148,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -175,7 +179,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -206,7 +210,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -234,7 +238,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -263,7 +267,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -294,7 +298,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -324,7 +328,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -354,7 +358,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -384,7 +388,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new UniformRecordGenerator(keyCnt2, valCnt2, false));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -411,7 +415,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new DelayingInfinitiveInputIterator(100));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -454,7 +458,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new DelayingInfinitiveInputIterator(100));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_BLOCKED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -497,7 +501,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new DelayingInfinitiveInputIterator(100));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_FIRST);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
@@ -540,7 +544,7 @@ public class CrossTaskTest extends DriverTestBase<GenericCrosser<Record, Record,
addInput(new DelayingInfinitiveInputIterator(100));
getTaskConfig().setDriverStrategy(DriverStrategy.NESTEDLOOP_STREAMED_OUTER_SECOND);
- getTaskConfig().setMemoryDriver(CROSS_MEM);
+ getTaskConfig().setRelativeMemoryDriver(cross_frac);
final CrossDriver<Record, Record, Record> testTask = new CrossDriver<Record, Record, Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/DataSinkTaskTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/DataSinkTaskTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/DataSinkTaskTest.java
index bfd0d42..67f9fe8 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/DataSinkTaskTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/DataSinkTaskTest.java
@@ -42,13 +42,13 @@ import eu.stratosphere.types.IntValue;
import eu.stratosphere.types.Key;
import eu.stratosphere.types.Record;
-public class DataSinkTaskTest extends TaskTestBase {
-
- private static final int MEMORY_MANAGER_SIZE = 1024 * 1024;
+public class DataSinkTaskTest extends TaskTestBase
+{
+ private static final Log LOG = LogFactory.getLog(DataSinkTaskTest.class);
+
+ private static final int MEMORY_MANAGER_SIZE = 3 * 1024 * 1024;
private static final int NETWORK_BUFFER_SIZE = 1024;
-
- private static final Log LOG = LogFactory.getLog(DataSinkTaskTest.class);
private final String tempTestPath = Path.constructTestPath("dst_test");
@@ -65,7 +65,7 @@ public class DataSinkTaskTest extends TaskTestBase {
int keyCnt = 100;
int valCnt = 20;
-
+
super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new UniformRecordGenerator(keyCnt, valCnt, false), 0);
@@ -131,7 +131,7 @@ public class DataSinkTaskTest extends TaskTestBase {
int keyCnt = 100;
int valCnt = 20;
-
+
super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new UniformRecordGenerator(keyCnt, valCnt, 0, 0, false), 0);
super.addInput(new UniformRecordGenerator(keyCnt, valCnt, keyCnt, 0, false), 0);
@@ -201,8 +201,9 @@ public class DataSinkTaskTest extends TaskTestBase {
int keyCnt = 100;
int valCnt = 20;
-
- super.initEnvironment(MEMORY_MANAGER_SIZE * 4, NETWORK_BUFFER_SIZE);
+ double memoryFraction = 1.0;
+
+ super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new UniformRecordGenerator(keyCnt, valCnt, true), 0);
DataSinkTask<Record> testTask = new DataSinkTask<Record>();
@@ -210,8 +211,9 @@ public class DataSinkTaskTest extends TaskTestBase {
// set sorting
super.getTaskConfig().setInputLocalStrategy(0, LocalStrategy.SORT);
super.getTaskConfig().setInputComparator(
- new RecordComparatorFactory(new int[]{1},((Class<? extends Key<?>>[])new Class[]{IntValue.class})), 0);
- super.getTaskConfig().setMemoryInput(0, 4 * 1024 * 1024);
+ new RecordComparatorFactory(new int[]{1},((Class<? extends Key<?>>[])new Class[]{IntValue.class})),
+ 0);
+ super.getTaskConfig().setRelativeMemoryInput(0, memoryFraction);
super.getTaskConfig().setFilehandlesInput(0, 8);
super.getTaskConfig().setSpillingThresholdInput(0, 0.8f);
@@ -279,7 +281,7 @@ public class DataSinkTaskTest extends TaskTestBase {
int keyCnt = 100;
int valCnt = 20;
-
+
super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new UniformRecordGenerator(keyCnt, valCnt, false), 0);
@@ -310,9 +312,10 @@ public class DataSinkTaskTest extends TaskTestBase {
public void testFailingSortingDataSinkTask() {
int keyCnt = 100;
- int valCnt = 20;
-
- super.initEnvironment(MEMORY_MANAGER_SIZE * 4, NETWORK_BUFFER_SIZE);
+ int valCnt = 20;;
+ double memoryFraction = 1.0;
+
+ super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new UniformRecordGenerator(keyCnt, valCnt, true), 0);
DataSinkTask<Record> testTask = new DataSinkTask<Record>();
@@ -322,8 +325,9 @@ public class DataSinkTaskTest extends TaskTestBase {
// set sorting
super.getTaskConfig().setInputLocalStrategy(0, LocalStrategy.SORT);
super.getTaskConfig().setInputComparator(
- new RecordComparatorFactory(new int[]{1},((Class<? extends Key<?>>[])new Class[]{IntValue.class})), 0);
- super.getTaskConfig().setMemoryInput(0, 4 * 1024 * 1024);
+ new RecordComparatorFactory(new int[]{1},((Class<? extends Key<?>>[])new Class[]{IntValue.class})),
+ 0);
+ super.getTaskConfig().setRelativeMemoryInput(0, memoryFraction);
super.getTaskConfig().setFilehandlesInput(0, 8);
super.getTaskConfig().setSpillingThresholdInput(0, 0.8f);
@@ -347,7 +351,7 @@ public class DataSinkTaskTest extends TaskTestBase {
@Test
public void testCancelDataSinkTask() {
-
+
super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new InfiniteInputIterator(), 0);
@@ -389,8 +393,9 @@ public class DataSinkTaskTest extends TaskTestBase {
@Test
@SuppressWarnings("unchecked")
public void testCancelSortingDataSinkTask() {
-
- super.initEnvironment(MEMORY_MANAGER_SIZE * 4, NETWORK_BUFFER_SIZE);
+ double memoryFraction = 1.0;
+
+ super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new InfiniteInputIterator(), 0);
final DataSinkTask<Record> testTask = new DataSinkTask<Record>();
@@ -402,7 +407,7 @@ public class DataSinkTaskTest extends TaskTestBase {
super.getTaskConfig().setInputComparator(
new RecordComparatorFactory(new int[]{1},((Class<? extends Key<?>>[])new Class[]{IntValue.class})),
0);
- super.getTaskConfig().setMemoryInput(0, 4 * 1024 * 1024);
+ super.getTaskConfig().setRelativeMemoryInput(0, memoryFraction);
super.getTaskConfig().setFilehandlesInput(0, 8);
super.getTaskConfig().setSpillingThresholdInput(0, 0.8f);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskExternalITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskExternalITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskExternalITCase.java
index 7ea8ea4..dd77059 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskExternalITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskExternalITCase.java
@@ -35,7 +35,11 @@ public class MatchTaskExternalITCase extends DriverTestBase<GenericJoiner<Record
private static final long SORT_MEM = 3*1024*1024;
private static final long BNLJN_MEM = 10 * PAGE_SIZE;
-
+
+ private final double bnljn_frac;
+
+ private final double hash_frac;
+
@SuppressWarnings("unchecked")
private final RecordComparator comparator1 = new RecordComparator(
new int[]{0}, (Class<? extends Key<?>>[])new Class[]{ IntValue.class });
@@ -48,6 +52,8 @@ public class MatchTaskExternalITCase extends DriverTestBase<GenericJoiner<Record
public MatchTaskExternalITCase() {
super(HASH_MEM, 2, SORT_MEM);
+ bnljn_frac = (double)BNLJN_MEM/this.getMemoryManager().getMemorySize();
+ hash_frac = (double)HASH_MEM/this.getMemoryManager().getMemorySize();
}
@Test
@@ -65,7 +71,7 @@ public class MatchTaskExternalITCase extends DriverTestBase<GenericJoiner<Record
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -99,7 +105,7 @@ public class MatchTaskExternalITCase extends DriverTestBase<GenericJoiner<Record
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(this.output);
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_FIRST);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -130,7 +136,7 @@ public class MatchTaskExternalITCase extends DriverTestBase<GenericJoiner<Record
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(this.output);
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskTest.java
index ce5a8c5..de56c0b 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/MatchTaskTest.java
@@ -40,8 +40,14 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
private static final long HASH_MEM = 6*1024*1024;
private static final long SORT_MEM = 3*1024*1024;
+
+ private static final int NUM_SORTER = 2;
private static final long BNLJN_MEM = 10 * PAGE_SIZE;
+
+ private final double bnljn_frac;
+
+ private final double hash_frac;
@SuppressWarnings("unchecked")
private final RecordComparator comparator1 = new RecordComparator(
@@ -55,7 +61,9 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
public MatchTaskTest() {
- super(HASH_MEM, 2, SORT_MEM);
+ super(HASH_MEM, NUM_SORTER, SORT_MEM);
+ bnljn_frac = (double)BNLJN_MEM/this.getMemoryManager().getMemorySize();
+ hash_frac = (double)HASH_MEM/this.getMemoryManager().getMemorySize();
}
@@ -72,7 +80,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -106,7 +114,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -142,7 +150,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -178,7 +186,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -214,7 +222,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -250,7 +258,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -286,7 +294,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -321,7 +329,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -357,7 +365,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -386,7 +394,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -437,7 +445,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -488,7 +496,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
addInputComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
getTaskConfig().setDriverStrategy(DriverStrategy.MERGE);
- getTaskConfig().setMemoryDriver(BNLJN_MEM);
+ getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -539,7 +547,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_FIRST);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -570,7 +578,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -601,7 +609,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_FIRST);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -632,7 +640,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -663,7 +671,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(this.outList);
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_FIRST);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -694,7 +702,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(new NirvanaOutputList());
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_FIRST);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -724,7 +732,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(new NirvanaOutputList());
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -755,7 +763,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
setOutput(new NirvanaOutputList());
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_FIRST);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -799,7 +807,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(new NirvanaOutputList());
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -843,7 +851,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(new NirvanaOutputList());
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_FIRST);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
@@ -887,7 +895,7 @@ public class MatchTaskTest extends DriverTestBase<GenericJoiner<Record, Record,
getTaskConfig().setDriverPairComparator(RecordPairComparatorFactory.get());
setOutput(new NirvanaOutputList());
getTaskConfig().setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- getTaskConfig().setMemoryDriver(HASH_MEM);
+ getTaskConfig().setRelativeMemoryDriver(hash_frac);
final MatchDriver<Record, Record, Record> testTask = new MatchDriver<Record, Record, Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskExternalITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskExternalITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskExternalITCase.java
index 7b14137..bd3524d 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskExternalITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskExternalITCase.java
@@ -130,7 +130,9 @@ public class ReduceTaskExternalITCase extends DriverTestBase<GenericGroupReduce<
try {
sorter = new CombiningUnilateralSortMerger<Record>(new MockCombiningReduceStub(),
getMemoryManager(), getIOManager(), new UniformRecordGenerator(keyCnt, valCnt, false),
- getOwningNepheleTask(), RecordSerializerFactory.get(), this.comparator.duplicate(), this.perSortMem, 2, 0.8f);
+ getOwningNepheleTask(), RecordSerializerFactory.get(), this.comparator.duplicate(),
+ this.perSortFractionMem,
+ 2, 0.8f);
addInput(sorter.getIterator());
GroupReduceDriver<Record, Record> testTask = new GroupReduceDriver<Record, Record>();
@@ -174,7 +176,9 @@ public class ReduceTaskExternalITCase extends DriverTestBase<GenericGroupReduce<
try {
sorter = new CombiningUnilateralSortMerger<Record>(new MockCombiningReduceStub(),
getMemoryManager(), getIOManager(), new UniformRecordGenerator(keyCnt, valCnt, false),
- getOwningNepheleTask(), RecordSerializerFactory.get(), this.comparator.duplicate(), this.perSortMem, 2, 0.8f);
+ getOwningNepheleTask(), RecordSerializerFactory.get(), this.comparator.duplicate(),
+ this.perSortFractionMem,
+ 2, 0.8f);
addInput(sorter.getIterator());
GroupReduceDriver<Record, Record> testTask = new GroupReduceDriver<Record, Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskTest.java
index ad859f4..a968ce2 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/ReduceTaskTest.java
@@ -125,7 +125,8 @@ public class ReduceTaskTest extends DriverTestBase<GenericGroupReduce<Record, Re
try {
sorter = new CombiningUnilateralSortMerger<Record>(new MockCombiningReduceStub(),
getMemoryManager(), getIOManager(), new UniformRecordGenerator(keyCnt, valCnt, false),
- getOwningNepheleTask(), RecordSerializerFactory.get(), this.comparator.duplicate(), this.perSortMem, 4, 0.8f);
+ getOwningNepheleTask(), RecordSerializerFactory.get(), this.comparator.duplicate(), this.perSortFractionMem,
+ 4, 0.8f);
addInput(sorter.getIterator());
GroupReduceDriver<Record, Record> testTask = new GroupReduceDriver<Record, Record>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/chaining/ChainTaskTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/chaining/ChainTaskTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/chaining/ChainTaskTest.java
index dda215e..3e9a04e 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/chaining/ChainTaskTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/chaining/ChainTaskTest.java
@@ -43,11 +43,11 @@ import eu.stratosphere.util.LogUtils;
public class ChainTaskTest extends TaskTestBase {
-
+
private static final int MEMORY_MANAGER_SIZE = 1024 * 1024 * 3;
private static final int NETWORK_BUFFER_SIZE = 1024;
-
+
private final List<Record> outList = new ArrayList<Record>();
@SuppressWarnings("unchecked")
@@ -67,10 +67,13 @@ public class ChainTaskTest extends TaskTestBase {
public void testMapTask() {
final int keyCnt = 100;
final int valCnt = 20;
+
+ final double memoryFraction = 1.0;
try {
+
// environment
- super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
+ initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
addInput(new UniformRecordGenerator(keyCnt, valCnt, false), 0);
addOutput(this.outList);
@@ -89,7 +92,7 @@ public class ChainTaskTest extends TaskTestBase {
// driver
combineConfig.setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
combineConfig.setDriverComparator(compFact, 0);
- combineConfig.setMemoryDriver(3 * 1024 * 1024);
+ combineConfig.setRelativeMemoryDriver(memoryFraction);
// udf
combineConfig.setStubWrapper(new UserCodeClassWrapper<MockReduceStub>(MockReduceStub.class));
@@ -123,10 +126,14 @@ public class ChainTaskTest extends TaskTestBase {
public void testFailingMapTask() {
int keyCnt = 100;
int valCnt = 20;
+
+ final long memorySize = 1024 * 1024 * 3;
+ final int bufferSize = 1014*1024;
+ final double memoryFraction = 1.0;
try {
// environment
- super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
+ initEnvironment(memorySize, bufferSize);
addInput(new UniformRecordGenerator(keyCnt, valCnt, false), 0);
addOutput(this.outList);
@@ -145,7 +152,7 @@ public class ChainTaskTest extends TaskTestBase {
// driver
combineConfig.setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
combineConfig.setDriverComparator(compFact, 0);
- combineConfig.setMemoryDriver(3 * 1024 * 1024);
+ combineConfig.setRelativeMemoryDriver(memoryFraction);
// udf
combineConfig.setStubWrapper(new UserCodeClassWrapper<MockFailingCombineStub>(MockFailingCombineStub.class));
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/ReduceCombineDriverTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/ReduceCombineDriverTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/ReduceCombineDriverTest.java
index ef88cb9..4467e30 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/ReduceCombineDriverTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/ReduceCombineDriverTest.java
@@ -43,7 +43,7 @@ public class ReduceCombineDriverTest {
try {
TestTaskContext<GenericReduce<Tuple2<String, Integer>>, Tuple2<String, Integer>> context =
new TestTaskContext<GenericReduce<Tuple2<String,Integer>>, Tuple2<String,Integer>>(1024 * 1024);
- context.getTaskConfig().setMemoryDriver(512 * 1024);
+ context.getTaskConfig().setRelativeMemoryDriver(0.5);
List<Tuple2<String, Integer>> data = DriverTestData.createReduceImmutableData();
Collections.shuffle(data);
@@ -80,7 +80,7 @@ public class ReduceCombineDriverTest {
{
TestTaskContext<GenericReduce<Tuple2<String, Integer>>, Tuple2<String, Integer>> context =
new TestTaskContext<GenericReduce<Tuple2<String,Integer>>, Tuple2<String,Integer>>(1024 * 1024);
- context.getTaskConfig().setMemoryDriver(512 * 1024);
+ context.getTaskConfig().setRelativeMemoryDriver(0.5);
List<Tuple2<String, Integer>> data = DriverTestData.createReduceImmutableData();
Collections.shuffle(data);
@@ -111,7 +111,7 @@ public class ReduceCombineDriverTest {
{
TestTaskContext<GenericReduce<Tuple2<String, Integer>>, Tuple2<String, Integer>> context =
new TestTaskContext<GenericReduce<Tuple2<String,Integer>>, Tuple2<String,Integer>>(1024 * 1024);
- context.getTaskConfig().setMemoryDriver(512 * 1024);
+ context.getTaskConfig().setRelativeMemoryDriver(0.5);
List<Tuple2<String, Integer>> data = DriverTestData.createReduceImmutableData();
Collections.shuffle(data);
@@ -152,7 +152,7 @@ public class ReduceCombineDriverTest {
{
TestTaskContext<GenericReduce<Tuple2<StringValue, IntValue>>, Tuple2<StringValue, IntValue>> context =
new TestTaskContext<GenericReduce<Tuple2<StringValue, IntValue>>, Tuple2<StringValue, IntValue>>(1024 * 1024);
- context.getTaskConfig().setMemoryDriver(512 * 1024);
+ context.getTaskConfig().setRelativeMemoryDriver(0.5);
List<Tuple2<StringValue, IntValue>> data = DriverTestData.createReduceMutableData();
TupleTypeInfo<Tuple2<StringValue, IntValue>> typeInfo = (TupleTypeInfo<Tuple2<StringValue, IntValue>>) TypeExtractor.getForObject(data.get(0));
@@ -180,7 +180,7 @@ public class ReduceCombineDriverTest {
{
TestTaskContext<GenericReduce<Tuple2<StringValue, IntValue>>, Tuple2<StringValue, IntValue>> context =
new TestTaskContext<GenericReduce<Tuple2<StringValue, IntValue>>, Tuple2<StringValue, IntValue>>(1024 * 1024);
- context.getTaskConfig().setMemoryDriver(512 * 1024);
+ context.getTaskConfig().setRelativeMemoryDriver(0.5);
List<Tuple2<StringValue, IntValue>> data = DriverTestData.createReduceMutableData();
TupleTypeInfo<Tuple2<StringValue, IntValue>> typeInfo = (TupleTypeInfo<Tuple2<StringValue, IntValue>>) TypeExtractor.getForObject(data.get(0));
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/TestTaskContext.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/TestTaskContext.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/TestTaskContext.java
index 78b0709..f458ae9 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/TestTaskContext.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/drivers/TestTaskContext.java
@@ -62,7 +62,7 @@ public class TestTaskContext<S, T> implements PactTaskContext<S, T> {
public TestTaskContext() {}
public TestTaskContext(long memoryInBytes) {
- this.memoryManager = new DefaultMemoryManager(memoryInBytes, 32 * 1024);
+ this.memoryManager = new DefaultMemoryManager(memoryInBytes,1 ,32 * 1024);
}
// --------------------------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DriverTestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DriverTestBase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DriverTestBase.java
index c1e2ea8..531382e 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DriverTestBase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DriverTestBase.java
@@ -64,6 +64,8 @@ public class DriverTestBase<S extends Function> implements PactTaskContext<S, Re
private final TaskConfig taskConfig;
protected final long perSortMem;
+
+ protected final double perSortFractionMem;
private Collector<Record> output;
@@ -95,8 +97,9 @@ public class DriverTestBase<S extends Function> implements PactTaskContext<S, Re
final long totalMem = Math.max(memory, 0) + (Math.max(maxNumSorters, 0) * perSortMemory);
this.perSortMem = perSortMemory;
+ this.perSortFractionMem = (double)perSortMemory/totalMem;
this.ioManager = new IOManager();
- this.memManager = totalMem > 0 ? new DefaultMemoryManager(totalMem) : null;
+ this.memManager = totalMem > 0 ? new DefaultMemoryManager(totalMem,1) : null;
this.inputs = new ArrayList<MutableObjectIterator<Record>>();
this.comparators = new ArrayList<TypeComparator<Record>>();
@@ -115,7 +118,8 @@ public class DriverTestBase<S extends Function> implements PactTaskContext<S, Re
public void addInputSorted(MutableObjectIterator<Record> input, RecordComparator comp) throws Exception {
UnilateralSortMerger<Record> sorter = new UnilateralSortMerger<Record>(
- this.memManager, this.ioManager, input, this.owner, RecordSerializerFactory.get(), comp, this.perSortMem, 32, 0.8f);
+ this.memManager, this.ioManager, input, this.owner, RecordSerializerFactory.get(), comp,
+ this.perSortFractionMem, 32, 0.8f);
this.sorters.add(sorter);
this.inputs.add(null);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/MockEnvironment.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/MockEnvironment.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/MockEnvironment.java
index 2585a74..ab1d4e4 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/MockEnvironment.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/MockEnvironment.java
@@ -74,7 +74,7 @@ public class MockEnvironment implements Environment, BufferProvider, LocalBuffer
this.inputs = new LinkedList<InputGate<Record>>();
this.outputs = new LinkedList<OutputGate>();
- this.memManager = new DefaultMemoryManager(memorySize);
+ this.memManager = new DefaultMemoryManager(memorySize, 1);
this.ioManager = new IOManager(System.getProperty("java.io.tmpdir"));
this.inputSplitProvider = inputSplitProvider;
this.mockBuffer = new Buffer(new MemorySegment(new byte[bufferSize]), bufferSize, null);
@@ -309,14 +309,13 @@ public class MockEnvironment implements Environment, BufferProvider, LocalBuffer
}
@Override
- public OutputGate createAndRegisterOutputGate()
- {
+ public OutputGate createAndRegisterOutputGate() {
return this.outputs.remove(0);
}
+ @SuppressWarnings("unchecked")
@Override
- public <T extends IOReadableWritable> InputGate<T> createAndRegisterInputGate()
- {
+ public <T extends IOReadableWritable> InputGate<T> createAndRegisterInputGate() {
return (InputGate<T>) this.inputs.remove(0);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
index f695979..1ee9293 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
@@ -354,7 +354,7 @@ public class InboundEnvelopeDecoderTest {
buf.readerIndex(0);
ByteBuf[] slices = randomSlices(buf);
- ch.writeInbound(slices);
+ ch.writeInbound((Object) slices);
for (ByteBuf slice : slices) {
Assert.assertEquals(1, slice.refCnt());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-test-utils/src/main/java/eu/stratosphere/test/compiler/util/CompilerTestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/compiler/util/CompilerTestBase.java b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/compiler/util/CompilerTestBase.java
index 7c37bec..8d07163 100644
--- a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/compiler/util/CompilerTestBase.java
+++ b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/compiler/util/CompilerTestBase.java
@@ -12,7 +12,6 @@
**********************************************************************************************************************/
package eu.stratosphere.test.compiler.util;
-import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
@@ -32,12 +31,6 @@ import eu.stratosphere.compiler.costs.DefaultCostEstimator;
import eu.stratosphere.compiler.plan.OptimizedPlan;
import eu.stratosphere.compiler.plan.PlanNode;
import eu.stratosphere.compiler.plan.SingleInputPlanNode;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.HardwareDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.instance.InstanceTypeDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
import eu.stratosphere.util.OperatingSystem;
/**
@@ -63,37 +56,28 @@ public abstract class CompilerTestBase {
protected PactCompiler noStatsCompiler;
- protected InstanceTypeDescription instanceType;
-
private int statCounter;
// ------------------------------------------------------------------------
@Before
- public void setup() {
- InetSocketAddress dummyAddr = new InetSocketAddress("localhost", 12345);
-
+ public void setup() {
this.dataStats = new DataStatistics();
- this.withStatsCompiler = new PactCompiler(this.dataStats, new DefaultCostEstimator(), dummyAddr);
+ this.withStatsCompiler = new PactCompiler(this.dataStats, new DefaultCostEstimator());
this.withStatsCompiler.setDefaultDegreeOfParallelism(DEFAULT_PARALLELISM);
- this.noStatsCompiler = new PactCompiler(null, new DefaultCostEstimator(), dummyAddr);
+ this.noStatsCompiler = new PactCompiler(null, new DefaultCostEstimator());
this.noStatsCompiler.setDefaultDegreeOfParallelism(DEFAULT_PARALLELISM);
-
- // create the instance type description
- InstanceType iType = InstanceTypeFactory.construct("standard", 6, 2, 4096, 100, 0);
- HardwareDescription hDesc = HardwareDescriptionFactory.construct(2, 4096 * 1024 * 1024, 2000 * 1024 * 1024);
- this.instanceType = InstanceTypeDescriptionFactory.construct(iType, hDesc, DEFAULT_PARALLELISM * 2);
}
// ------------------------------------------------------------------------
public OptimizedPlan compileWithStats(Plan p) {
- return this.withStatsCompiler.compile(p, this.instanceType);
+ return this.withStatsCompiler.compile(p);
}
public OptimizedPlan compileNoStats(Plan p) {
- return this.noStatsCompiler.compile(p, this.instanceType);
+ return this.noStatsCompiler.compile(p);
}
public void setSourceStatistics(GenericDataSourceBase<?, ?> source, long size, float recordWidth) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/AbstractTestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/AbstractTestBase.java b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/AbstractTestBase.java
index 28a2417..2873d86 100644
--- a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/AbstractTestBase.java
+++ b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/AbstractTestBase.java
@@ -45,26 +45,29 @@ import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.util.LogUtils;
public abstract class AbstractTestBase {
- private static final int DEFAULT_NUM_TASK_MANAGER = 1;
+ protected static final int MINIMUM_HEAP_SIZE_MB = 192;
- private static final int MINIMUM_HEAP_SIZE_MB = 192;
-
- private static final long MEMORY_SIZE = 80;
+ protected static final long TASK_MANAGER_MEMORY_SIZE = 80;
+
+ protected static final int DEFAULT_TASK_MANAGER_NUM_SLOTS = 1;
+
+ protected static final int DEFAULT_NUM_TASK_TRACKER = 1;
- private int numTaskManager = DEFAULT_NUM_TASK_MANAGER;
-
protected final Configuration config;
protected NepheleMiniCluster executor;
private final List<File> tempFiles;
-
-
+
+ protected int taskManagerNumSlots = DEFAULT_TASK_MANAGER_NUM_SLOTS;
+
+ protected int numTaskTracker = DEFAULT_NUM_TASK_TRACKER;
+
public AbstractTestBase(Configuration config) {
verifyJvmOptions();
this.config = config;
this.tempFiles = new ArrayList<File>();
-
+
LogUtils.initializeDefaultConsoleLogger(Level.WARN);
}
@@ -73,15 +76,6 @@ public abstract class AbstractTestBase {
Assert.assertTrue("Insufficient java heap space " + heap + "mb - set JVM option: -Xmx" + MINIMUM_HEAP_SIZE_MB
+ "m", heap > MINIMUM_HEAP_SIZE_MB - 50);
}
-
- // --------------------------------------------------------------------------------------------
- // Getter/Setter
- // --------------------------------------------------------------------------------------------
-
- public int getNumTaskManager() { return numTaskManager; }
-
- public void setNumTaskManager(int numTaskManager) { this.numTaskManager = numTaskManager; }
-
// --------------------------------------------------------------------------------------------
// Local Test Cluster Life Cycle
// --------------------------------------------------------------------------------------------
@@ -91,8 +85,9 @@ public abstract class AbstractTestBase {
this.executor = new NepheleMiniCluster();
this.executor.setDefaultOverwriteFiles(true);
this.executor.setLazyMemoryAllocation(true);
- this.executor.setMemorySize(MEMORY_SIZE);
- this.executor.setNumTaskManager(this.numTaskManager);
+ this.executor.setMemorySize(TASK_MANAGER_MEMORY_SIZE);
+ this.executor.setTaskManagerNumSlots(taskManagerNumSlots);
+ this.executor.setNumTaskTracker(this.numTaskTracker);
this.executor.start();
}
@@ -109,6 +104,19 @@ public abstract class AbstractTestBase {
deleteAllTempFiles();
}
}
+
+ //------------------
+ // Accessors
+ //------------------
+
+ public int getTaskManagerNumSlots() { return taskManagerNumSlots; }
+
+ public void setTaskManagerNumSlots(int taskManagerNumSlots) { this.taskManagerNumSlots = taskManagerNumSlots; }
+
+ public int getNumTaskTracker() { return numTaskTracker; }
+
+ public void setNumTaskTracker(int numTaskTracker) { this.numTaskTracker = numTaskTracker; }
+
// --------------------------------------------------------------------------------------------
// Temporary File Utilities
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/JavaProgramTestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/JavaProgramTestBase.java b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/JavaProgramTestBase.java
index dc83a56..2aa000a 100644
--- a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/JavaProgramTestBase.java
+++ b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/JavaProgramTestBase.java
@@ -45,11 +45,13 @@ public abstract class JavaProgramTestBase extends AbstractTestBase {
public JavaProgramTestBase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(degreeOfParallelism);
}
public void setDegreeOfParallelism(int degreeOfParallelism) {
this.degreeOfParallelism = degreeOfParallelism;
+ setTaskManagerNumSlots(degreeOfParallelism);
}
public JobExecutionResult getLatestExecutionResult() {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/RecordAPITestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/RecordAPITestBase.java b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/RecordAPITestBase.java
index 45be660..23e7c2b 100644
--- a/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/RecordAPITestBase.java
+++ b/stratosphere-test-utils/src/main/java/eu/stratosphere/test/util/RecordAPITestBase.java
@@ -28,6 +28,8 @@ import eu.stratosphere.nephele.client.JobClient;
import eu.stratosphere.nephele.jobgraph.JobGraph;
public abstract class RecordAPITestBase extends AbstractTestBase {
+
+ protected static final int DOP = 4;
protected JobExecutionResult jobExecutionResult;
@@ -40,6 +42,7 @@ public abstract class RecordAPITestBase extends AbstractTestBase {
public RecordAPITestBase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/accumulators/AccumulatorITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/accumulators/AccumulatorITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/accumulators/AccumulatorITCase.java
index a14ee03..18bc3e9 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/accumulators/AccumulatorITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/accumulators/AccumulatorITCase.java
@@ -71,13 +71,14 @@ public class AccumulatorITCase extends RecordAPITestBase {
private static final String INPUT = "one\n" + "two two\n" + "three three three\n";
private static final String EXPECTED = "one 1\ntwo 2\nthree 3\n";
- private static final int NUM_SUBTASKS = 2;
+ private static final int DOP = 2;
protected String dataPath;
protected String resultPath;
public AccumulatorITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -97,7 +98,7 @@ public class AccumulatorITCase extends RecordAPITestBase {
Assert.assertEquals(new Integer(3), (Integer) res.getAccumulatorResult("num-lines"));
- Assert.assertEquals(new Double(NUM_SUBTASKS), (Double)res.getAccumulatorResult("open-close-counter"));
+ Assert.assertEquals(new Double(DOP), (Double)res.getAccumulatorResult("open-close-counter"));
// Test histogram (words per line distribution)
Map<Integer, Integer> dist = Maps.newHashMap();
@@ -121,7 +122,7 @@ public class AccumulatorITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config1 = new Configuration();
- config1.setInteger("IterationAllReducer#NoSubtasks", NUM_SUBTASKS);
+ config1.setInteger("IterationAllReducer#NoSubtasks", DOP);
return toParameterList(config1);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/BroadcastVarsNepheleITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/BroadcastVarsNepheleITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/BroadcastVarsNepheleITCase.java
index b80810b..75c50fc 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/BroadcastVarsNepheleITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/BroadcastVarsNepheleITCase.java
@@ -62,13 +62,17 @@ public class BroadcastVarsNepheleITCase extends RecordAPITestBase {
private static final int NUM_FEATURES = 3;
+ private static final int DOP = 4;
+
protected String pointsPath;
protected String modelsPath;
protected String resultPath;
-
+ public BroadcastVarsNepheleITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
public static final String getInputPoints(int numPoints, int numDimensions, long seed) {
@@ -122,7 +126,7 @@ public class BroadcastVarsNepheleITCase extends RecordAPITestBase {
@Override
protected JobGraph getJobGraph() throws Exception {
- return createJobGraphV1(this.pointsPath, this.modelsPath, this.resultPath, 4);
+ return createJobGraphV1(this.pointsPath, this.modelsPath, this.resultPath, DOP);
}
@Override
@@ -222,7 +226,7 @@ public class BroadcastVarsNepheleITCase extends RecordAPITestBase {
@SuppressWarnings("unchecked")
private static JobInputVertex createPointsInput(JobGraph jobGraph, String pointsPath, int numSubTasks, TypeSerializerFactory<?> serializer) {
CsvInputFormat pointsInFormat = new CsvInputFormat(' ', LongValue.class, LongValue.class, LongValue.class, LongValue.class);
- JobInputVertex pointsInput = JobGraphUtils.createInput(pointsInFormat, pointsPath, "Input[Points]", jobGraph, numSubTasks, numSubTasks);
+ JobInputVertex pointsInput = JobGraphUtils.createInput(pointsInFormat, pointsPath, "Input[Points]", jobGraph, numSubTasks);
{
TaskConfig taskConfig = new TaskConfig(pointsInput.getConfiguration());
@@ -236,7 +240,7 @@ public class BroadcastVarsNepheleITCase extends RecordAPITestBase {
@SuppressWarnings("unchecked")
private static JobInputVertex createModelsInput(JobGraph jobGraph, String pointsPath, int numSubTasks, TypeSerializerFactory<?> serializer) {
CsvInputFormat modelsInFormat = new CsvInputFormat(' ', LongValue.class, LongValue.class, LongValue.class, LongValue.class);
- JobInputVertex modelsInput = JobGraphUtils.createInput(modelsInFormat, pointsPath, "Input[Models]", jobGraph, numSubTasks, numSubTasks);
+ JobInputVertex modelsInput = JobGraphUtils.createInput(modelsInFormat, pointsPath, "Input[Models]", jobGraph, numSubTasks);
{
TaskConfig taskConfig = new TaskConfig(modelsInput.getConfiguration());
@@ -248,7 +252,7 @@ public class BroadcastVarsNepheleITCase extends RecordAPITestBase {
}
private static JobTaskVertex createMapper(JobGraph jobGraph, int numSubTasks, TypeSerializerFactory<?> serializer) {
- JobTaskVertex pointsInput = JobGraphUtils.createTask(RegularPactTask.class, "Map[DotProducts]", jobGraph, numSubTasks, numSubTasks);
+ JobTaskVertex pointsInput = JobGraphUtils.createTask(RegularPactTask.class, "Map[DotProducts]", jobGraph, numSubTasks);
{
TaskConfig taskConfig = new TaskConfig(pointsInput.getConfiguration());
@@ -272,7 +276,7 @@ public class BroadcastVarsNepheleITCase extends RecordAPITestBase {
}
private static JobOutputVertex createOutput(JobGraph jobGraph, String resultPath, int numSubTasks, TypeSerializerFactory<?> serializer) {
- JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Output", numSubTasks, numSubTasks);
+ JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Output", numSubTasks);
{
TaskConfig taskConfig = new TaskConfig(output.getConfiguration());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/KMeansIterativeNepheleITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/KMeansIterativeNepheleITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/KMeansIterativeNepheleITCase.java
index 8cda32f..5e86af5 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/KMeansIterativeNepheleITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/broadcastvars/KMeansIterativeNepheleITCase.java
@@ -59,7 +59,11 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
private static final int ITERATION_ID = 42;
private static final int MEMORY_PER_CONSUMER = 2;
-
+
+ private static final int DOP = 4;
+
+ private static final double MEMORY_FRACTION_PER_CONSUMER = (double)MEMORY_PER_CONSUMER/TASK_MANAGER_MEMORY_SIZE*DOP;
+
protected String dataPath;
protected String clusterPath;
protected String resultPath;
@@ -67,6 +71,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
public KMeansIterativeNepheleITCase() {
LogUtils.initializeDefaultConsoleLogger(Level.ERROR);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -83,7 +88,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
@Override
protected JobGraph getJobGraph() throws Exception {
- return createJobGraph(dataPath, clusterPath, this.resultPath, 4, 20);
+ return createJobGraph(dataPath, clusterPath, this.resultPath, DOP, 20);
}
// -------------------------------------------------------------------------------------------------------------
@@ -93,7 +98,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
private static JobInputVertex createPointsInput(JobGraph jobGraph, String pointsPath, int numSubTasks, TypeSerializerFactory<?> serializer) {
@SuppressWarnings("unchecked")
CsvInputFormat pointsInFormat = new CsvInputFormat('|', IntValue.class, DoubleValue.class, DoubleValue.class, DoubleValue.class);
- JobInputVertex pointsInput = JobGraphUtils.createInput(pointsInFormat, pointsPath, "[Points]", jobGraph, numSubTasks, numSubTasks);
+ JobInputVertex pointsInput = JobGraphUtils.createInput(pointsInFormat, pointsPath, "[Points]", jobGraph, numSubTasks);
{
TaskConfig taskConfig = new TaskConfig(pointsInput.getConfiguration());
taskConfig.addOutputShipStrategy(ShipStrategyType.FORWARD);
@@ -114,7 +119,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
private static JobInputVertex createCentersInput(JobGraph jobGraph, String centersPath, int numSubTasks, TypeSerializerFactory<?> serializer) {
@SuppressWarnings("unchecked")
CsvInputFormat modelsInFormat = new CsvInputFormat('|', IntValue.class, DoubleValue.class, DoubleValue.class, DoubleValue.class);
- JobInputVertex modelsInput = JobGraphUtils.createInput(modelsInFormat, centersPath, "[Models]", jobGraph, numSubTasks, numSubTasks);
+ JobInputVertex modelsInput = JobGraphUtils.createInput(modelsInFormat, centersPath, "[Models]", jobGraph, numSubTasks);
{
TaskConfig taskConfig = new TaskConfig(modelsInput.getConfiguration());
@@ -135,7 +140,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
private static JobOutputVertex createOutput(JobGraph jobGraph, String resultPath, int numSubTasks, TypeSerializerFactory<?> serializer) {
- JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Output", numSubTasks, numSubTasks);
+ JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Output", numSubTasks);
{
TaskConfig taskConfig = new TaskConfig(output.getConfiguration());
@@ -152,7 +157,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
}
private static JobTaskVertex createIterationHead(JobGraph jobGraph, int numSubTasks, TypeSerializerFactory<?> serializer) {
- JobTaskVertex head = JobGraphUtils.createTask(IterationHeadPactTask.class, "Iteration Head", jobGraph, numSubTasks, numSubTasks);
+ JobTaskVertex head = JobGraphUtils.createTask(IterationHeadPactTask.class, "Iteration Head", jobGraph, numSubTasks);
TaskConfig headConfig = new TaskConfig(head.getConfiguration());
headConfig.setIterationId(ITERATION_ID);
@@ -163,7 +168,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
headConfig.setInputSerializer(serializer, 0);
// back channel / iterations
- headConfig.setBackChannelMemory(MEMORY_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeBackChannelMemory(MEMORY_FRACTION_PER_CONSUMER);
// output into iteration. broadcasting the centers
headConfig.setOutputSerializer(serializer);
@@ -190,7 +195,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
TypeComparatorFactory<?> outputComparator)
{
JobTaskVertex mapper = JobGraphUtils.createTask(IterationIntermediatePactTask.class,
- "Map (Select nearest center)", jobGraph, numSubTasks, numSubTasks);
+ "Map (Select nearest center)", jobGraph, numSubTasks);
TaskConfig intermediateConfig = new TaskConfig(mapper.getConfiguration());
intermediateConfig.setIterationId(ITERATION_ID);
@@ -220,7 +225,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
// ---------------- the tail (co group) --------------------
JobTaskVertex tail = JobGraphUtils.createTask(IterationTailPactTask.class, "Reduce / Iteration Tail", jobGraph,
- numSubTasks, numSubTasks);
+ numSubTasks);
TaskConfig tailConfig = new TaskConfig(tail.getConfiguration());
tailConfig.setIterationId(ITERATION_ID);
@@ -235,7 +240,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
tailConfig.setInputLocalStrategy(0, LocalStrategy.SORT);
tailConfig.setInputComparator(inputComparator, 0);
- tailConfig.setMemoryInput(0, MEMORY_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ tailConfig.setRelativeMemoryInput(0, MEMORY_FRACTION_PER_CONSUMER);
tailConfig.setFilehandlesInput(0, 128);
tailConfig.setSpillingThresholdInput(0, 0.9f);
@@ -279,7 +284,7 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
JobTaskVertex reducer = createReducer(jobGraph, numSubTasks, serializer, int0Comparator, serializer);
- JobOutputVertex fakeTailOutput = JobGraphUtils.createFakeOutput(jobGraph, "FakeTailOutput", numSubTasks, numSubTasks);
+ JobOutputVertex fakeTailOutput = JobGraphUtils.createFakeOutput(jobGraph, "FakeTailOutput", numSubTasks);
JobOutputVertex sync = createSync(jobGraph, numIterations, numSubTasks);
@@ -293,7 +298,8 @@ public class KMeansIterativeNepheleITCase extends RecordAPITestBase {
JobGraphUtils.connect(head, mapper, ChannelType.NETWORK, DistributionPattern.BIPARTITE);
new TaskConfig(mapper.getConfiguration()).setBroadcastGateIterativeWithNumberOfEventsUntilInterrupt(0, numSubTasks);
new TaskConfig(mapper.getConfiguration()).setInputCached(0, true);
- new TaskConfig(mapper.getConfiguration()).setInputMaterializationMemory(0, MEMORY_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ new TaskConfig(mapper.getConfiguration()).setRelativeInputMaterializationMemory(0,
+ MEMORY_FRACTION_PER_CONSUMER);
JobGraphUtils.connect(mapper, reducer, ChannelType.NETWORK, DistributionPattern.BIPARTITE);
new TaskConfig(reducer.getConfiguration()).setGateIterativeWithNumberOfEventsUntilInterrupt(0, numSubTasks);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/CancellingTestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/CancellingTestBase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/CancellingTestBase.java
index 8ce656e..1fc289d 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/CancellingTestBase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/CancellingTestBase.java
@@ -60,9 +60,13 @@ public abstract class CancellingTestBase {
*/
private static final int DEFAULT_CANCEL_FINISHED_INTERVAL = 10 * 1000;
+ private static final int DEFAULT_TASK_MANAGER_NUM_SLOTS = 1;
+
// --------------------------------------------------------------------------------------------
protected NepheleMiniCluster executor;
+
+ protected int taskManagerNumSlots = DEFAULT_TASK_MANAGER_NUM_SLOTS;
// --------------------------------------------------------------------------------------------
@@ -83,7 +87,7 @@ public abstract class CancellingTestBase {
verifyJvmOptions();
this.executor = new NepheleMiniCluster();
this.executor.setDefaultOverwriteFiles(true);
-
+ this.executor.setTaskManagerNumSlots(taskManagerNumSlots);
this.executor.start();
}
@@ -231,4 +235,8 @@ public abstract class CancellingTestBase {
final NepheleJobGraphGenerator jgg = new NepheleJobGraphGenerator();
return jgg.compileJobGraph(op);
}
+
+ public void setTaskManagerNumSlots(int taskManagerNumSlots) { this.taskManagerNumSlots = taskManagerNumSlots; }
+
+ public int getTaskManagerNumSlots() { return this.taskManagerNumSlots; }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MapCancelingITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MapCancelingITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MapCancelingITCase.java
index 1aeb229..7d48ae8 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MapCancelingITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MapCancelingITCase.java
@@ -27,6 +27,11 @@ import eu.stratosphere.types.Record;
import eu.stratosphere.util.Collector;
public class MapCancelingITCase extends CancellingTestBase {
+ private static final int DOP = 4;
+
+ public MapCancelingITCase() {
+ setTaskManagerNumSlots(DOP);
+ }
// @Test
public void testMapCancelling() throws Exception {
@@ -40,7 +45,7 @@ public class MapCancelingITCase extends CancellingTestBase {
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 5 * 1000, 10 * 1000);
}
@@ -57,7 +62,7 @@ public class MapCancelingITCase extends CancellingTestBase {
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 5 * 1000, 10 * 1000);
}
@@ -74,7 +79,7 @@ public class MapCancelingITCase extends CancellingTestBase {
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 10 * 1000, 10 * 1000);
}
@@ -91,7 +96,7 @@ public class MapCancelingITCase extends CancellingTestBase {
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 10 * 1000, 10 * 1000);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MatchJoinCancelingITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MatchJoinCancelingITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MatchJoinCancelingITCase.java
index 09413f0..82e2ace 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MatchJoinCancelingITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/cancelling/MatchJoinCancelingITCase.java
@@ -30,6 +30,11 @@ import eu.stratosphere.types.Record;
import eu.stratosphere.util.Collector;
public class MatchJoinCancelingITCase extends CancellingTestBase {
+ private static final int DOP = 4;
+
+ public MatchJoinCancelingITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
// --------------- Test Sort Matches that are canceled while still reading / sorting -----------------
// @Test
@@ -48,7 +53,7 @@ public class MatchJoinCancelingITCase extends CancellingTestBase {
GenericDataSink sink = new GenericDataSink(new DiscardingOutputFormat(), matcher, "Sink");
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 3000, 10*1000);
}
@@ -69,7 +74,7 @@ public class MatchJoinCancelingITCase extends CancellingTestBase {
GenericDataSink sink = new GenericDataSink(new DiscardingOutputFormat(), matcher, "Sink");
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 5000, 10*1000);
}
@@ -90,7 +95,7 @@ public class MatchJoinCancelingITCase extends CancellingTestBase {
GenericDataSink sink = new GenericDataSink(new DiscardingOutputFormat(), matcher, "Sink");
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 5000);
@@ -117,7 +122,7 @@ public class MatchJoinCancelingITCase extends CancellingTestBase {
GenericDataSink sink = new GenericDataSink(new DiscardingOutputFormat(), matcher, "Sink");
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 30 * 1000, 30 * 1000);
}
@@ -145,7 +150,7 @@ public class MatchJoinCancelingITCase extends CancellingTestBase {
GenericDataSink sink = new GenericDataSink(new DiscardingOutputFormat(), matcher, "Sink");
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 10 * 1000, 20 * 1000);
}
@@ -171,7 +176,7 @@ public class MatchJoinCancelingITCase extends CancellingTestBase {
GenericDataSink sink = new GenericDataSink(new DiscardingOutputFormat(), matcher, "Sink");
Plan p = new Plan(sink);
- p.setDefaultParallelism(4);
+ p.setDefaultParallelism(DOP);
runAndCancelJob(p, 10 * 1000, 10 * 1000);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/clients/examples/LocalExecutorITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/clients/examples/LocalExecutorITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/clients/examples/LocalExecutorITCase.java
index 984ecc2..b198d99 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/clients/examples/LocalExecutorITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/clients/examples/LocalExecutorITCase.java
@@ -23,8 +23,11 @@ import eu.stratosphere.client.LocalExecutor;
import eu.stratosphere.test.recordJobs.wordcount.WordCount;
import eu.stratosphere.test.testdata.WordCountData;
+
public class LocalExecutorITCase {
+ private static final int DOP = 4;
+
@Test
public void testLocalExecutorWithWordCount() {
try {
@@ -40,14 +43,15 @@ public class LocalExecutorITCase {
// run WordCount
WordCount wc = new WordCount();
- wc.getPlan("4", inFile.toURI().toString(), outFile.toURI().toString());
-
+
LocalExecutor executor = new LocalExecutor();
LocalExecutor.setLoggingLevel(Level.WARN);
executor.setDefaultOverwriteFiles(true);
+ executor.setTaskManagerNumSlots(DOP);
executor.start();
- executor.executePlan(wc.getPlan("4", inFile.toURI().toString(), outFile.toURI().toString()));
+ executor.executePlan(wc.getPlan(new Integer(DOP).toString(), inFile.toURI().toString(),
+ outFile.toURI().toString()));
executor.stop();
} catch (Exception e) {
e.printStackTrace();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/WordCountITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/WordCountITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/WordCountITCase.java
index 4a60836..272bce6 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/WordCountITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/WordCountITCase.java
@@ -25,7 +25,9 @@ public class WordCountITCase extends JavaProgramTestBase {
protected String resultPath;
public WordCountITCase(){
- setNumTaskManager(2);
+ setDegreeOfParallelism(4);
+ setNumTaskTracker(2);
+ setTaskManagerNumSlots(2);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ComputeEdgeDegreesITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ComputeEdgeDegreesITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ComputeEdgeDegreesITCase.java
index d2caeb7..831a9ae 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ComputeEdgeDegreesITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ComputeEdgeDegreesITCase.java
@@ -27,7 +27,7 @@ public class ComputeEdgeDegreesITCase extends eu.stratosphere.test.recordJobTest
protected Plan getTestJob() {
ComputeEdgeDegrees computeDegrees = new ComputeEdgeDegrees();
return computeDegrees.getScalaPlan(
- config.getInteger("ComputeEdgeDegreesTest#NumSubtasks", 4),
+ config.getInteger("ComputeEdgeDegreesTest#NumSubtasks", DOP),
edgesPath, resultPath);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ConnectedComponentsITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ConnectedComponentsITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ConnectedComponentsITCase.java
index 6725bde..40f95af 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ConnectedComponentsITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/ConnectedComponentsITCase.java
@@ -22,7 +22,7 @@ public class ConnectedComponentsITCase extends eu.stratosphere.test.iterative.Co
protected Plan getTestJob() {
ConnectedComponents cc = new ConnectedComponents();
Plan plan = cc.getScalaPlan(verticesPath, edgesPath, resultPath, 100);
- plan.setDefaultParallelism(4);
+ plan.setDefaultParallelism(DOP);
return plan;
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/EnumTrianglesOnEdgesWithDegreesITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/EnumTrianglesOnEdgesWithDegreesITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/EnumTrianglesOnEdgesWithDegreesITCase.java
index 5801d59..81b5c2a 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/EnumTrianglesOnEdgesWithDegreesITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/EnumTrianglesOnEdgesWithDegreesITCase.java
@@ -27,7 +27,7 @@ public class EnumTrianglesOnEdgesWithDegreesITCase extends eu.stratosphere.test.
protected Plan getTestJob() {
EnumTrianglesOnEdgesWithDegrees enumTriangles = new EnumTrianglesOnEdgesWithDegrees();
return enumTriangles.getScalaPlan(
- config.getInteger("EnumTrianglesTest#NumSubtasks", 4),
+ config.getInteger("EnumTrianglesTest#NumSubtasks", DOP),
edgesPath, resultPath);
}
}
[22/22] git commit: Add some options for slot-based scheduling and
changed default parallelism to one.
Posted by se...@apache.org.
Add some options for slot-based scheduling and changed default parallelism to one.
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/b4b633ea
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/b4b633ea
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/b4b633ea
Branch: refs/heads/master
Commit: b4b633eab9a70e14d2e0dd5252f4b092a3689093
Parents: 8c1d82a
Author: Stephan Ewen <se...@apache.org>
Authored: Sun Jun 22 18:19:57 2014 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:09:13 2014 +0200
----------------------------------------------------------------------
.../configuration/ConfigConstants.java | 5 --
.../memory/OutputViewDataOutputWrapper.java | 2 +-
.../util/InstantiationUtilsTest.java | 4 +-
.../conf/stratosphere-conf.yaml | 2 +
.../api/java/io/CollectionInputFormat.java | 15 ++--
.../api/java/io/CollectionInputFormatTest.java | 79 ++++++++++++++++++++
.../nephele/executiongraph/ExecutionGraph.java | 4 +-
.../nephele/jobmanager/JobManager.java | 4 +-
.../nephele/taskmanager/TaskManager.java | 19 +++--
.../executiongraph/ExecutionGraphTest.java | 4 +-
.../nephele/jobmanager/JobManagerITCase.java | 2 +-
11 files changed, 107 insertions(+), 33 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java b/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
index eff48cc..b4699b3 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
@@ -29,11 +29,6 @@ public final class ConfigConstants {
* The config parameter defining the default degree of parallelism for jobs.
*/
public static final String DEFAULT_PARALLELIZATION_DEGREE_KEY = "parallelization.degree.default";
-
- /**
- * The config parameter defining the maximal intra-node parallelism for jobs.
- */
- public static final String PARALLELIZATION_MAX_INTRA_NODE_DEGREE_KEY = "parallelization.intra-node.default";
// -------------------------------- Runtime -------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-core/src/main/java/eu/stratosphere/core/memory/OutputViewDataOutputWrapper.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/core/memory/OutputViewDataOutputWrapper.java b/stratosphere-core/src/main/java/eu/stratosphere/core/memory/OutputViewDataOutputWrapper.java
index 7bb8f8c..cb636ce 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/core/memory/OutputViewDataOutputWrapper.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/core/memory/OutputViewDataOutputWrapper.java
@@ -110,7 +110,7 @@ public class OutputViewDataOutputWrapper implements DataOutputView {
@Override
public void write(DataInputView source, int numBytes) throws IOException {
for (int i = 0; i < numBytes; i++) {
- this.delegate.writeByte(source.readByte());
+ this.delegate.writeByte(source.readUnsignedByte());
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-core/src/test/java/eu/stratosphere/util/InstantiationUtilsTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/test/java/eu/stratosphere/util/InstantiationUtilsTest.java b/stratosphere-core/src/test/java/eu/stratosphere/util/InstantiationUtilsTest.java
index 50c8703..8b55635 100644
--- a/stratosphere-core/src/test/java/eu/stratosphere/util/InstantiationUtilsTest.java
+++ b/stratosphere-core/src/test/java/eu/stratosphere/util/InstantiationUtilsTest.java
@@ -60,7 +60,5 @@ public class InstantiationUtilsTest {
InstantiationUtil.checkForInstantiation(TestClass.class);
}
- private class TestClass {
-
- }
+ private class TestClass {}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-dist/src/main/stratosphere-bin/conf/stratosphere-conf.yaml
----------------------------------------------------------------------
diff --git a/stratosphere-dist/src/main/stratosphere-bin/conf/stratosphere-conf.yaml b/stratosphere-dist/src/main/stratosphere-bin/conf/stratosphere-conf.yaml
index ccf1c21..ccc67c2 100644
--- a/stratosphere-dist/src/main/stratosphere-bin/conf/stratosphere-conf.yaml
+++ b/stratosphere-dist/src/main/stratosphere-bin/conf/stratosphere-conf.yaml
@@ -25,6 +25,8 @@ jobmanager.heap.mb: 256
taskmanager.heap.mb: 512
+taskmanager.numberOfTaskSlots: -1
+
parallelization.degree.default: 1
#==============================================================================
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/CollectionInputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/CollectionInputFormat.java b/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/CollectionInputFormat.java
index fd5ae36..82f2755 100644
--- a/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/CollectionInputFormat.java
+++ b/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/CollectionInputFormat.java
@@ -37,10 +37,10 @@ public class CollectionInputFormat<T> extends GenericInputFormat<T> implements N
private static final long serialVersionUID = 1L;
- private Collection<T> dataSet; // input data as collection
-
private TypeSerializer<T> serializer;
+ private transient Collection<T> dataSet; // input data as collection. transient, because it will be serialized in a custom way
+
private transient Iterator<T> iterator;
@@ -75,7 +75,7 @@ public class CollectionInputFormat<T> extends GenericInputFormat<T> implements N
// --------------------------------------------------------------------------------------------
private void writeObject(ObjectOutputStream out) throws IOException {
- out.writeObject(serializer);
+ out.defaultWriteObject();
out.writeInt(dataSet.size());
OutputViewDataOutputWrapper outWrapper = new OutputViewDataOutputWrapper();
@@ -86,13 +86,8 @@ public class CollectionInputFormat<T> extends GenericInputFormat<T> implements N
}
}
- @SuppressWarnings("unchecked")
- private void readObject(ObjectInputStream in) throws IOException {
- try {
- this.serializer = (TypeSerializer<T>) in.readObject();
- } catch (ClassNotFoundException ex){
- throw new IOException("Could not load the serializer class.", ex);
- }
+ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+ in.defaultReadObject();
int collectionLength = in.readInt();
List<T> list = new ArrayList<T>(collectionLength);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-java/src/test/java/eu/stratosphere/api/java/io/CollectionInputFormatTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-java/src/test/java/eu/stratosphere/api/java/io/CollectionInputFormatTest.java b/stratosphere-java/src/test/java/eu/stratosphere/api/java/io/CollectionInputFormatTest.java
index 4388c9c..f734540 100644
--- a/stratosphere-java/src/test/java/eu/stratosphere/api/java/io/CollectionInputFormatTest.java
+++ b/stratosphere-java/src/test/java/eu/stratosphere/api/java/io/CollectionInputFormatTest.java
@@ -17,6 +17,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import eu.stratosphere.api.java.typeutils.BasicTypeInfo;
import eu.stratosphere.api.java.typeutils.TypeExtractor;
import eu.stratosphere.core.io.GenericInputSplit;
import eu.stratosphere.types.TypeInformation;
@@ -29,7 +30,9 @@ import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
+import java.util.List;
public class CollectionInputFormatTest {
public static class ElementType{
@@ -105,4 +108,80 @@ public class CollectionInputFormatTest {
fail(ex.toString());
}
}
+
+ @Test
+ public void testSerializabilityStrings() {
+
+ final String[] data = new String[] {
+ "To be, or not to be,--that is the question:--",
+ "Whether 'tis nobler in the mind to suffer",
+ "The slings and arrows of outrageous fortune",
+ "Or to take arms against a sea of troubles,",
+ "And by opposing end them?--To die,--to sleep,--",
+ "No more; and by a sleep to say we end",
+ "The heartache, and the thousand natural shocks",
+ "That flesh is heir to,--'tis a consummation",
+ "Devoutly to be wish'd. To die,--to sleep;--",
+ "To sleep! perchance to dream:--ay, there's the rub;",
+ "For in that sleep of death what dreams may come,",
+ "When we have shuffled off this mortal coil,",
+ "Must give us pause: there's the respect",
+ "That makes calamity of so long life;",
+ "For who would bear the whips and scorns of time,",
+ "The oppressor's wrong, the proud man's contumely,",
+ "The pangs of despis'd love, the law's delay,",
+ "The insolence of office, and the spurns",
+ "That patient merit of the unworthy takes,",
+ "When he himself might his quietus make",
+ "With a bare bodkin? who would these fardels bear,",
+ "To grunt and sweat under a weary life,",
+ "But that the dread of something after death,--",
+ "The undiscover'd country, from whose bourn",
+ "No traveller returns,--puzzles the will,",
+ "And makes us rather bear those ills we have",
+ "Than fly to others that we know not of?",
+ "Thus conscience does make cowards of us all;",
+ "And thus the native hue of resolution",
+ "Is sicklied o'er with the pale cast of thought;",
+ "And enterprises of great pith and moment,",
+ "With this regard, their currents turn awry,",
+ "And lose the name of action.--Soft you now!",
+ "The fair Ophelia!--Nymph, in thy orisons",
+ "Be all my sins remember'd."
+ };
+
+ try {
+
+ List<String> inputCollection = Arrays.asList(data);
+ CollectionInputFormat<String> inputFormat = new CollectionInputFormat<String>(inputCollection, BasicTypeInfo.STRING_TYPE_INFO.createSerializer());
+
+ // serialize
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ ObjectOutputStream oos = new ObjectOutputStream(baos);
+ oos.writeObject(inputFormat);
+ oos.close();
+
+ // deserialize
+ ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+ ObjectInputStream ois = new ObjectInputStream(bais);
+ Object result = ois.readObject();
+
+ assertTrue(result instanceof CollectionInputFormat);
+
+ int i = 0;
+ @SuppressWarnings("unchecked")
+ CollectionInputFormat<String> in = (CollectionInputFormat<String>) result;
+ in.open(new GenericInputSplit());
+
+ while (!in.reachedEnd()) {
+ assertEquals(data[i++], in.nextRecord(""));
+ }
+
+ assertEquals(data.length, i);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
index 18395fb..5886650 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
@@ -145,7 +145,6 @@ public class ExecutionGraph implements ExecutionListener {
* the configuration originally attached to the job graph
*/
private ExecutionGraph(final JobID jobID, final String jobName, final Configuration jobConfiguration) {
-
if (jobID == null) {
throw new IllegalArgumentException("Argument jobID must not be null");
}
@@ -165,8 +164,7 @@ public class ExecutionGraph implements ExecutionListener {
* @throws GraphConversionException
* thrown if the job graph is not valid and no execution graph can be constructed from it
*/
- public ExecutionGraph(final JobGraph job, final int defaultParallelism)
- throws GraphConversionException {
+ public ExecutionGraph(JobGraph job, int defaultParallelism) throws GraphConversionException {
this(job.getJobID(), job.getName(), job.getJobConfiguration());
// Start constructing the new execution graph from given job graph
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
index f3cf3a3..40e2a0b 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
@@ -16,14 +16,12 @@ package eu.stratosphere.nephele.jobmanager;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.InputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
-import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -468,7 +466,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
ExecutionGraph eg;
try {
- eg = new ExecutionGraph(job, this.getAvailableSlots());
+ eg = new ExecutionGraph(job, 1);
} catch (GraphConversionException e) {
if (e.getCause() == null) {
return new JobSubmissionResult(AbstractJobResult.ReturnCode.ERROR, StringUtils.stringifyException(e));
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
index 575454f..3225ab7 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
@@ -172,8 +172,12 @@ public class TaskManager implements TaskOperationProtocol {
throw new NullPointerException("Execution mode must not be null.");
}
-
-// LOG.info("TaskManager started as user " + UserGroupInformation.getCurrentUser().getShortUserName());
+ try {
+ LOG.info("TaskManager started as user " + UserGroupInformation.getCurrentUser().getShortUserName());
+ } catch (Throwable t) {
+ LOG.error("Cannot determine user group information.", t);
+ }
+
LOG.info("User system property: " + System.getProperty("user.name"));
LOG.info("Execution mode: " + executionMode);
@@ -344,9 +348,14 @@ public class TaskManager implements TaskOperationProtocol {
{
HardwareDescription resources = HardwareDescriptionFactory.extractFromSystem();
- numberOfSlots = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS,
- Hardware.getNumberCPUCores());
-
+ int slots = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, -1);
+ if (slots == -1) {
+ slots = Hardware.getNumberCPUCores();
+ } else if (slots <= 0) {
+ throw new Exception("Illegal value for the number of task slots: " + slots);
+ }
+ this.numberOfSlots = slots;
+
// Check whether the memory size has been explicitly configured. if so that overrides the default mechanism
// of taking as much as is mentioned in the hardware description
long memorySize = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, -1);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
index 2e75305..9d8700d 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
@@ -104,7 +104,7 @@ public class ExecutionGraphTest {
LibraryCacheManager.register(jobID, new String[0]);
- final ExecutionGraph eg = new ExecutionGraph(jg, -1);
+ final ExecutionGraph eg = new ExecutionGraph(jg, 1);
// test all methods of ExecutionGraph
final ExecutionStage executionStage = eg.getCurrentExecutionStage();
@@ -215,7 +215,7 @@ public class ExecutionGraphTest {
assertEquals(1, egv2.getNumberOfBackwardLinks());
assertEquals(1, egv2.getNumberOfForwardLinks());
assertEquals(0, egv2.getStageNumber());
- assertEquals(-1, egv2.getUserDefinedNumberOfMembers());
+ assertEquals(1, egv2.getUserDefinedNumberOfMembers());
assertNull(egv2.getVertexToShareInstancesWith());
// test all methods of ExecutionVertex
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/b4b633ea/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
index 2549d4f..ffb958a 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
@@ -66,7 +66,7 @@ public class JobManagerITCase {
static {
// no logging, because the tests create expected exception
- LogUtils.initializeDefaultConsoleLogger(Level.INFO);
+ LogUtils.initializeDefaultConsoleLogger(Level.WARN);
}
/**
[10/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/WorksetIterationNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/WorksetIterationNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/WorksetIterationNode.java
index 2c70794..f425695 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/WorksetIterationNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/WorksetIterationNode.java
@@ -155,8 +155,7 @@ public class WorksetIterationNode extends TwoInputNode implements IterationNode
UnaryOperatorNode solutionSetDeltaUpdateAux = new UnaryOperatorNode("Solution-Set Delta", getSolutionSetKeyFields(),
new SolutionSetDeltaOperator(getSolutionSetKeyFields()));
solutionSetDeltaUpdateAux.setDegreeOfParallelism(getDegreeOfParallelism());
- solutionSetDeltaUpdateAux.setSubtasksPerInstance(getSubtasksPerInstance());
-
+
PactConnection conn = new PactConnection(solutionSetDelta, solutionSetDeltaUpdateAux);
solutionSetDeltaUpdateAux.setIncomingConnection(conn);
solutionSetDelta.addOutgoingConnection(conn);
@@ -218,11 +217,6 @@ public class WorksetIterationNode extends TwoInputNode implements IterationNode
// --------------------------------------------------------------------------------------------
@Override
- public boolean isMemoryConsumer() {
- return true;
- }
-
- @Override
protected List<OperatorDescriptorDual> getPossibleProperties() {
return new ArrayList<OperatorDescriptorDual>(1);
}
@@ -331,13 +325,12 @@ public class WorksetIterationNode extends TwoInputNode implements IterationNode
else if (report == FeedbackPropertiesMeetRequirementsReport.NOT_MET) {
// attach a no-op node through which we create the properties of the original input
Channel toNoOp = new Channel(candidate);
- globPropsReqWorkset.parameterizeChannel(toNoOp, false, false);
+ globPropsReqWorkset.parameterizeChannel(toNoOp, false);
locPropsReqWorkset.parameterizeChannel(toNoOp);
UnaryOperatorNode rebuildWorksetPropertiesNode = new UnaryOperatorNode("Rebuild Workset Properties", FieldList.EMPTY_LIST);
rebuildWorksetPropertiesNode.setDegreeOfParallelism(candidate.getDegreeOfParallelism());
- rebuildWorksetPropertiesNode.setSubtasksPerInstance(candidate.getSubtasksPerInstance());
SingleInputPlanNode rebuildWorksetPropertiesPlanNode = new SingleInputPlanNode(rebuildWorksetPropertiesNode, "Rebuild Workset Properties", toNoOp, DriverStrategy.UNARY_NO_OP);
rebuildWorksetPropertiesPlanNode.initProperties(toNoOp.getGlobalProperties(), toNoOp.getLocalProperties());
@@ -518,7 +511,6 @@ public class WorksetIterationNode extends TwoInputNode implements IterationNode
super(new NoOpBinaryUdfOp<Nothing>(new NothingTypeInfo()));
setDegreeOfParallelism(1);
- setSubtasksPerInstance(1);
}
public void setInputs(PactConnection input1, PactConnection input2) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dataproperties/RequestedGlobalProperties.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dataproperties/RequestedGlobalProperties.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dataproperties/RequestedGlobalProperties.java
index 574922a..e769508 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dataproperties/RequestedGlobalProperties.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dataproperties/RequestedGlobalProperties.java
@@ -53,8 +53,7 @@ public final class RequestedGlobalProperties implements Cloneable {
/**
* Sets the partitioning property for the global properties.
*
- * @param partitioning The new partitioning to set.
- * @param partitionedFields
+ * @param partitionedFields
*/
public void setHashPartitioned(FieldSet partitionedFields) {
if (partitionedFields == null) {
@@ -218,7 +217,7 @@ public final class RequestedGlobalProperties implements Cloneable {
* @param globalDopChange
* @param localDopChange
*/
- public void parameterizeChannel(Channel channel, boolean globalDopChange, boolean localDopChange) {
+ public void parameterizeChannel(Channel channel, boolean globalDopChange) {
// if we request nothing, then we need no special strategy. forward, if the number of instances remains
// the same, randomly repartition otherwise
if (isTrivial()) {
@@ -228,8 +227,7 @@ public final class RequestedGlobalProperties implements Cloneable {
final GlobalProperties inGlobals = channel.getSource().getGlobalProperties();
// if we have no global parallelism change, check if we have already compatible global properties
- if (!globalDopChange && !localDopChange && isMetBy(inGlobals)) {
- // we meet already everything, so go forward
+ if (!globalDopChange && isMetBy(inGlobals)) {
channel.setShipStrategy(ShipStrategyType.FORWARD);
return;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllGroupWithPartialPreGroupProperties.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllGroupWithPartialPreGroupProperties.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllGroupWithPartialPreGroupProperties.java
index b389855..0ef277e 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllGroupWithPartialPreGroupProperties.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllGroupWithPartialPreGroupProperties.java
@@ -48,8 +48,7 @@ public final class AllGroupWithPartialPreGroupProperties extends OperatorDescrip
// create an input node for combine with same DOP as input node
GroupReduceNode combinerNode = ((GroupReduceNode) node).getCombinerUtilityNode();
combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());
- combinerNode.setSubtasksPerInstance(in.getSource().getSubtasksPerInstance());
-
+
SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine ("+node.getPactContract().getName()+")", toCombiner, DriverStrategy.ALL_GROUP_COMBINE);
combiner.setCosts(new Costs(0, 0));
combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllReduceProperties.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllReduceProperties.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllReduceProperties.java
index be3ed74..867b9d9 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllReduceProperties.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/AllReduceProperties.java
@@ -48,8 +48,7 @@ public final class AllReduceProperties extends OperatorDescriptorSingle
// create an input node for combine with same DOP as input node
ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());
- combinerNode.setSubtasksPerInstance(in.getSource().getSubtasksPerInstance());
-
+
SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine ("+node.getPactContract().getName()+")", toCombiner, DriverStrategy.ALL_REDUCE);
combiner.setCosts(new Costs(0, 0));
combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/GroupReduceWithCombineProperties.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/GroupReduceWithCombineProperties.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/GroupReduceWithCombineProperties.java
index 980cf6d..ec45a53 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/GroupReduceWithCombineProperties.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/GroupReduceWithCombineProperties.java
@@ -85,9 +85,9 @@ public final class GroupReduceWithCombineProperties extends OperatorDescriptorSi
// create an input node for combine with same DOP as input node
GroupReduceNode combinerNode = ((GroupReduceNode) node).getCombinerUtilityNode();
combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());
- combinerNode.setSubtasksPerInstance(in.getSource().getSubtasksPerInstance());
-
- SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine ("+node.getPactContract().getName()+")", toCombiner, DriverStrategy.SORTED_GROUP_COMBINE, this.keyList);
+
+ SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine("+node.getPactContract()
+ .getName()+")", toCombiner, DriverStrategy.SORTED_GROUP_COMBINE, this.keyList);
combiner.setCosts(new Costs(0, 0));
combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/PartialGroupProperties.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/PartialGroupProperties.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/PartialGroupProperties.java
index a28feeb..9fb97b5 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/PartialGroupProperties.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/PartialGroupProperties.java
@@ -44,9 +44,9 @@ public final class PartialGroupProperties extends OperatorDescriptorSingle {
// create in input node for combine with same DOP as input node
GroupReduceNode combinerNode = new GroupReduceNode((GroupReduceOperatorBase<?, ?, ?>) node.getPactContract());
combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());
- combinerNode.setSubtasksPerInstance(in.getSource().getSubtasksPerInstance());
-
- return new SingleInputPlanNode(combinerNode, "Combine("+node.getPactContract().getName()+")", in, DriverStrategy.SORTED_GROUP_COMBINE, this.keyList);
+
+ return new SingleInputPlanNode(combinerNode, "Combine("+node.getPactContract().getName()+")", in,
+ DriverStrategy.SORTED_GROUP_COMBINE, this.keyList);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/ReduceProperties.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/ReduceProperties.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/ReduceProperties.java
index 4539da5..0db3fa5 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/ReduceProperties.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/operators/ReduceProperties.java
@@ -56,8 +56,7 @@ public final class ReduceProperties extends OperatorDescriptorSingle {
// create an input node for combine with same DOP as input node
ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());
- combinerNode.setSubtasksPerInstance(in.getSource().getSubtasksPerInstance());
-
+
SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine ("+node.getPactContract().getName()+")", toCombiner, DriverStrategy.SORTED_PARTIAL_REDUCE, this.keyList);
combiner.setCosts(new Costs(0, 0));
combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/Channel.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/Channel.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/Channel.java
index 8fe95c9..6f9418f 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/Channel.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/Channel.java
@@ -68,11 +68,11 @@ public class Channel implements EstimateProvider, Cloneable, DumpableConnection<
private TempMode tempMode;
- private long tempMemory;
+ private double relativeTempMemory;
- private long memoryGlobalStrategy;
+ private double relativeMemoryGlobalStrategy;
- private long memoryLocalStrategy;
+ private double relativeMemoryLocalStrategy;
private int replicationFactor = 1;
@@ -200,17 +200,17 @@ public class Channel implements EstimateProvider, Cloneable, DumpableConnection<
*
* @return The temp memory.
*/
- public long getTempMemory() {
- return this.tempMemory;
+ public double getRelativeTempMemory() {
+ return this.relativeTempMemory;
}
/**
* Sets the memory for materializing the channel's result from this Channel.
*
- * @param tempMemory The memory for materialization.
+ * @param relativeTempMemory The memory for materialization.
*/
- public void setTempMemory(long tempMemory) {
- this.tempMemory = tempMemory;
+ public void setRelativeTempMemory(double relativeTempMemory) {
+ this.relativeTempMemory = relativeTempMemory;
}
/**
@@ -286,20 +286,20 @@ public class Channel implements EstimateProvider, Cloneable, DumpableConnection<
this.localStrategyComparator = localStrategyComparator;
}
- public long getMemoryGlobalStrategy() {
- return memoryGlobalStrategy;
+ public double getRelativeMemoryGlobalStrategy() {
+ return relativeMemoryGlobalStrategy;
}
- public void setMemoryGlobalStrategy(long memoryGlobalStrategy) {
- this.memoryGlobalStrategy = memoryGlobalStrategy;
+ public void setRelativeMemoryGlobalStrategy(double relativeMemoryGlobalStrategy) {
+ this.relativeMemoryGlobalStrategy = relativeMemoryGlobalStrategy;
}
- public long getMemoryLocalStrategy() {
- return memoryLocalStrategy;
+ public double getRelativeMemoryLocalStrategy() {
+ return relativeMemoryLocalStrategy;
}
- public void setMemoryLocalStrategy(long memoryLocalStrategy) {
- this.memoryLocalStrategy = memoryLocalStrategy;
+ public void setRelativeMemoryLocalStrategy(double relativeMemoryLocalStrategy) {
+ this.relativeMemoryLocalStrategy = relativeMemoryLocalStrategy;
}
public boolean isOnDynamicPath() {
@@ -437,33 +437,6 @@ public class Channel implements EstimateProvider, Cloneable, DumpableConnection<
}
throw new CompilerException("Unrecognized Ship Strategy Type: " + this.shipStrategy);
}
-
- public void adjustGlobalPropertiesForLocalParallelismChange() {
- if (this.shipStrategy == null || this.shipStrategy == ShipStrategyType.NONE) {
- throw new IllegalStateException("Cannot adjust channel for degree of parallelism " +
- "change before the ship strategy is set.");
- }
-
- // make sure the properties are acquired
- if (this.globalProps == null) {
- getGlobalProperties();
- }
-
- // some strategies globally reestablish properties
- switch (this.shipStrategy) {
- case FORWARD:
- this.globalProps.reset();
- return;
- case NONE: // excluded by sanity check. just here to silence compiler warnings check completion
- case BROADCAST:
- case PARTITION_HASH:
- case PARTITION_RANGE:
- case PARTITION_RANDOM:
- return;
- }
-
- throw new CompilerException("Unrecognized Ship Strategy Type: " + this.shipStrategy);
- }
// --------------------------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/PlanNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/PlanNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/PlanNode.java
index 69263bc..539006c 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/PlanNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plan/PlanNode.java
@@ -65,12 +65,10 @@ public abstract class PlanNode implements Visitable<PlanNode>, DumpableNode<Plan
protected Costs cumulativeCosts; // the cumulative costs of all operators in the sub-tree
- private long memoryPerSubTask; // the amount of memory dedicated to each task, in bytes
+ private double relativeMemoryPerSubTask; // the amount of memory dedicated to each task, in bytes
private int degreeOfParallelism;
- private int subtasksPerInstance;
-
private boolean pFlag; // flag for the internal pruning algorithm
// --------------------------------------------------------------------------------------------
@@ -83,8 +81,7 @@ public abstract class PlanNode implements Visitable<PlanNode>, DumpableNode<Plan
this.driverStrategy = strategy;
this.degreeOfParallelism = template.getDegreeOfParallelism();
- this.subtasksPerInstance = template.getSubtasksPerInstance();
-
+
// check, if there is branch at this node. if yes, this candidate must be associated with
// the branching template node.
if (template.isBranching()) {
@@ -166,17 +163,17 @@ public abstract class PlanNode implements Visitable<PlanNode>, DumpableNode<Plan
*
* @return The memory per task, in bytes.
*/
- public long getMemoryPerSubTask() {
- return this.memoryPerSubTask;
+ public double getRelativeMemoryPerSubTask() {
+ return this.relativeMemoryPerSubTask;
}
/**
* Sets the memory dedicated to each task for this node.
*
- * @param memoryPerTask The memory per sub-task, in bytes.
+ * @param relativeMemoryPerSubtask The relative memory per sub-task
*/
- public void setMemoryPerSubTask(long memoryPerTask) {
- this.memoryPerSubTask = memoryPerTask;
+ public void setRelativeMemoryPerSubtask(double relativeMemoryPerSubtask) {
+ this.relativeMemoryPerSubTask = relativeMemoryPerSubtask;
}
/**
@@ -303,18 +300,10 @@ public abstract class PlanNode implements Visitable<PlanNode>, DumpableNode<Plan
this.degreeOfParallelism = parallelism;
}
- public void setSubtasksPerInstance(int subTasksPerInstance) {
- this.subtasksPerInstance = subTasksPerInstance;
- }
-
public int getDegreeOfParallelism() {
return this.degreeOfParallelism;
}
- public int getSubtasksPerInstance() {
- return this.subtasksPerInstance;
- }
-
public long getGuaranteedAvailableMemory() {
return this.template.getMinimalMemoryAcrossAllSubTasks();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plandump/PlanJSONDumpGenerator.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plandump/PlanJSONDumpGenerator.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plandump/PlanJSONDumpGenerator.java
index 82d757c..a1baff1 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plandump/PlanJSONDumpGenerator.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plandump/PlanJSONDumpGenerator.java
@@ -252,9 +252,6 @@ public class PlanJSONDumpGenerator {
writer.print(",\n\t\t\"parallelism\": \""
+ (n.getDegreeOfParallelism() >= 1 ? n.getDegreeOfParallelism() : "default") + "\"");
- writer.print(",\n\t\t\"subtasks_per_instance\": \""
- + (n.getSubtasksPerInstance() >= 1 ? n.getSubtasksPerInstance() : "default") + "\"");
-
// output node predecessors
Iterator<? extends DumpableConnection<?>> inConns = node.getDumpableInputs().iterator();
String child1name = "", child2name = "";
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
index 5a30fb6..b4c7560 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
@@ -22,9 +22,6 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import eu.stratosphere.api.common.aggregators.AggregatorRegistry;
import eu.stratosphere.api.common.aggregators.AggregatorWithName;
import eu.stratosphere.api.common.aggregators.ConvergenceCriterion;
@@ -101,7 +98,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
private static final boolean mergeIterationAuxTasks = GlobalConfiguration.getBoolean(MERGE_ITERATION_AUX_TASKS_KEY, true);
- private static final Log LOG = LogFactory.getLog(NepheleJobGraphGenerator.class);
+// private static final Log LOG = LogFactory.getLog(NepheleJobGraphGenerator.class);
private static final TaskInChain ALREADY_VISITED_PLACEHOLDER = new TaskInChain(null, null, null);
@@ -186,13 +183,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
t.addChainedTask(tic.getChainedTask(), tic.getTaskConfig(), tic.getTaskName());
}
- // now that all have been created, make sure that all share their instances with the one
- // with the highest degree of parallelism
- if (program.getInstanceTypeName() != null) {
- this.maxDegreeVertex.setInstanceType(program.getInstanceTypeName());
- } else {
- LOG.warn("No instance type assigned to JobVertex.");
- }
for (AbstractJobVertex vertex : this.vertices.values()) {
if (vertex != this.maxDegreeVertex) {
vertex.setVertexToShareInstancesWith(this.maxDegreeVertex);
@@ -231,7 +221,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
* @param node
* The node that is currently processed.
* @return True, if the visitor should descend to the node's children, false if not.
- * @see eu.stratosphere.util.Visitor#preVisit(eu.stratosphere.pact.common.plan.Visitable)
+ * @see eu.stratosphere.util.Visitor#preVisit(eu.stratosphere.util.Visitable)
*/
@Override
public boolean preVisit(PlanNode node) {
@@ -260,8 +250,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
// operator with the tail, if they have the same DOP. not merging is currently not
// implemented
PlanNode root = iterationNode.getRootOfStepFunction();
- if (root.getDegreeOfParallelism() != node.getDegreeOfParallelism() ||
- root.getSubtasksPerInstance() != node.getSubtasksPerInstance())
+ if (root.getDegreeOfParallelism() != node.getDegreeOfParallelism())
{
throw new CompilerException("Error: The final operator of the step " +
"function has a different degree of parallelism than the iteration operator itself.");
@@ -278,14 +267,12 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
PlanNode nextWorkSet = iterationNode.getNextWorkSetPlanNode();
PlanNode solutionSetDelta = iterationNode.getSolutionSetDeltaPlanNode();
- if (nextWorkSet.getDegreeOfParallelism() != node.getDegreeOfParallelism() ||
- nextWorkSet.getSubtasksPerInstance() != node.getSubtasksPerInstance())
+ if (nextWorkSet.getDegreeOfParallelism() != node.getDegreeOfParallelism())
{
throw new CompilerException("It is currently not supported that the final operator of the step " +
"function has a different degree of parallelism than the iteration operator itself.");
}
- if (solutionSetDelta.getDegreeOfParallelism() != node.getDegreeOfParallelism() ||
- solutionSetDelta.getSubtasksPerInstance() != node.getSubtasksPerInstance())
+ if (solutionSetDelta.getDegreeOfParallelism() != node.getDegreeOfParallelism())
{
throw new CompilerException("It is currently not supported that the final operator of the step " +
"function has a different degree of parallelism than the iteration operator itself.");
@@ -364,11 +351,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
if (this.maxDegreeVertex == null || this.maxDegreeVertex.getNumberOfSubtasks() < pd) {
this.maxDegreeVertex = vertex;
}
-
- // set the number of tasks per instance
- if (node.getSubtasksPerInstance() >= 1) {
- vertex.setNumberOfSubtasksPerInstance(node.getSubtasksPerInstance());
- }
// check whether this vertex is part of an iteration step function
if (this.currentIteration != null) {
@@ -377,10 +359,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
if (iterationNode.getDegreeOfParallelism() < pd) {
throw new CompilerException("Error: All functions that are part of an iteration must have the same, or a lower, degree-of-parallelism than the iteration operator.");
}
- if (iterationNode.getSubtasksPerInstance() < node.getSubtasksPerInstance()) {
- throw new CompilerException("Error: All functions that are part of an iteration must have the same, or a lower, number of subtasks-per-node than the iteration operator.");
- }
-
+
// store the id of the iterations the step functions participate in
IterationDescriptor descr = this.iterations.get(this.currentIteration);
new TaskConfig(vertex.getConfiguration()).setIterationId(descr.getId());
@@ -401,7 +380,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
*
* @param node
* The node currently processed during the post-visit.
- * @see eu.stratosphere.util.Visitor#postVisit(eu.stratosphere.pact.common.plan.Visitable)
+ * @see eu.stratosphere.util.Visitor#postVisit(eu.stratosphere.util.Visitable) t
*/
@Override
public void postVisit(PlanNode node) {
@@ -739,7 +718,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
inConn.getLocalStrategy() == LocalStrategy.NONE &&
pred.getOutgoingChannels().size() == 1 &&
node.getDegreeOfParallelism() == pred.getDegreeOfParallelism() &&
- node.getSubtasksPerInstance() == pred.getSubtasksPerInstance() &&
node.getBroadcastInputs().isEmpty();
// cannot chain the nodes that produce the next workset or the next solution set, if they are not the
@@ -879,7 +857,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
c.getLocalStrategy() == LocalStrategy.NONE &&
c.getTempMode() == TempMode.NONE &&
successor.getDegreeOfParallelism() == pspn.getDegreeOfParallelism() &&
- successor.getSubtasksPerInstance() == pspn.getSubtasksPerInstance() &&
!(successor instanceof NAryUnionPlanNode) &&
successor != iteration.getRootOfStepFunction() &&
iteration.getInput().getLocalStrategy() == LocalStrategy.NONE;
@@ -948,7 +925,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
c.getLocalStrategy() == LocalStrategy.NONE &&
c.getTempMode() == TempMode.NONE &&
successor.getDegreeOfParallelism() == wspn.getDegreeOfParallelism() &&
- successor.getSubtasksPerInstance() == wspn.getSubtasksPerInstance() &&
!(successor instanceof NAryUnionPlanNode) &&
successor != iteration.getNextWorkSetPlanNode() &&
iteration.getInitialWorksetInput().getLocalStrategy() == LocalStrategy.NONE;
@@ -995,17 +971,17 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
}
private void assignDriverResources(PlanNode node, TaskConfig config) {
- final long mem = node.getMemoryPerSubTask();
- if (mem > 0) {
- config.setMemoryDriver(mem);
+ final double relativeMem = node.getRelativeMemoryPerSubTask();
+ if (relativeMem > 0) {
+ config.setRelativeMemoryDriver(relativeMem);
config.setFilehandlesDriver(this.defaultMaxFan);
config.setSpillingThresholdDriver(this.defaultSortSpillingThreshold);
}
}
private void assignLocalStrategyResources(Channel c, TaskConfig config, int inputNum) {
- if (c.getMemoryLocalStrategy() > 0) {
- config.setMemoryInput(inputNum, c.getMemoryLocalStrategy());
+ if (c.getRelativeMemoryLocalStrategy() > 0) {
+ config.setRelativeMemoryInput(inputNum, c.getRelativeMemoryLocalStrategy());
config.setFilehandlesInput(inputNum, this.defaultMaxFan);
config.setSpillingThresholdInput(inputNum, this.defaultSortSpillingThreshold);
}
@@ -1020,13 +996,13 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
* channel is then the channel into the union node, the local strategy channel the one from the union to the
* actual target operator.
*
- * @param channelForGlobalStrategy
- * @param channelForLocalStrategy
+ * @param channel
* @param inputNumber
* @param sourceVertex
* @param sourceConfig
* @param targetVertex
* @param targetConfig
+ * @param isBroadcast
* @throws JobGraphDefinitionException
* @throws CompilerException
*/
@@ -1133,10 +1109,10 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
if (needsMemory) {
// sanity check
- if (tm == null || tm == TempMode.NONE || channel.getTempMemory() < 1) {
+ if (tm == null || tm == TempMode.NONE || channel.getRelativeTempMemory() <= 0) {
throw new CompilerException("Bug in compiler: Inconsistent description of input materialization.");
}
- config.setInputMaterializationMemory(inputNum, channel.getTempMemory());
+ config.setRelativeInputMaterializationMemory(inputNum, channel.getRelativeTempMemory());
}
}
}
@@ -1153,11 +1129,11 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
final int numFinalOuts = headFinalOutputConfig.getNumOutputs();
headConfig.setIterationHeadFinalOutputConfig(headFinalOutputConfig);
headConfig.setIterationHeadIndexOfSyncOutput(numStepFunctionOuts + numFinalOuts);
- final long memForBackChannel = bulkNode.getMemoryPerSubTask();
- if (memForBackChannel <= 0) {
+ final double relativeMemForBackChannel = bulkNode.getRelativeMemoryPerSubTask();
+ if (relativeMemForBackChannel <= 0) {
throw new CompilerException("Bug: No memory has been assigned to the iteration back channel.");
}
- headConfig.setBackChannelMemory(memForBackChannel);
+ headConfig.setRelativeBackChannelMemory(relativeMemForBackChannel);
// --------------------------- create the sync task ---------------------------
final JobOutputVertex sync = new JobOutputVertex("Sync(" +
@@ -1219,7 +1195,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
JobOutputVertex fakeTail = new JobOutputVertex("Fake Tail", this.jobGraph);
fakeTail.setOutputClass(FakeOutputTask.class);
fakeTail.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
- fakeTail.setNumberOfSubtasksPerInstance(headVertex.getNumberOfSubtasksPerInstance());
this.auxVertices.add(fakeTail);
// connect the fake tail
@@ -1262,7 +1237,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
JobOutputVertex fakeTailTerminationCriterion = new JobOutputVertex("Fake Tail for Termination Criterion", this.jobGraph);
fakeTailTerminationCriterion.setOutputClass(FakeOutputTask.class);
fakeTailTerminationCriterion.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
- fakeTailTerminationCriterion.setNumberOfSubtasksPerInstance(headVertex.getNumberOfSubtasksPerInstance());
this.auxVertices.add(fakeTailTerminationCriterion);
// connect the fake tail
@@ -1310,14 +1284,14 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
final int numFinalOuts = headFinalOutputConfig.getNumOutputs();
headConfig.setIterationHeadFinalOutputConfig(headFinalOutputConfig);
headConfig.setIterationHeadIndexOfSyncOutput(numStepFunctionOuts + numFinalOuts);
- final long mem = iterNode.getMemoryPerSubTask();
- if (mem <= 0) {
+ final double relativeMemory = iterNode.getRelativeMemoryPerSubTask();
+ if (relativeMemory <= 0) {
throw new CompilerException("Bug: No memory has been assigned to the workset iteration.");
}
headConfig.setIsWorksetIteration();
- headConfig.setBackChannelMemory(mem / 2);
- headConfig.setSolutionSetMemory(mem / 2);
+ headConfig.setRelativeBackChannelMemory(relativeMemory / 2);
+ headConfig.setRelativeSolutionSetMemory(relativeMemory / 2);
// set the solution set serializer and comparator
headConfig.setSolutionSetSerializer(iterNode.getSolutionSetSerializer());
@@ -1396,7 +1370,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
JobOutputVertex fakeTail = new JobOutputVertex("Fake Tail", this.jobGraph);
fakeTail.setOutputClass(FakeOutputTask.class);
fakeTail.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
- fakeTail.setNumberOfSubtasksPerInstance(headVertex.getNumberOfSubtasksPerInstance());
this.auxVertices.add(fakeTail);
// connect the fake tail
@@ -1435,7 +1408,6 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
JobOutputVertex fakeTail = new JobOutputVertex("Fake Tail", this.jobGraph);
fakeTail.setOutputClass(FakeOutputTask.class);
fakeTail.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
- fakeTail.setNumberOfSubtasksPerInstance(headVertex.getNumberOfSubtasksPerInstance());
this.auxVertices.add(fakeTail);
// connect the fake tail
@@ -1502,9 +1474,9 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
private AbstractJobVertex containingVertex;
- @SuppressWarnings("unchecked")
- TaskInChain(@SuppressWarnings("rawtypes") Class<? extends ChainedDriver> chainedTask, TaskConfig taskConfig, String taskName) {
- this.chainedTask = (Class<? extends ChainedDriver<?, ?>>) chainedTask;
+ TaskInChain(Class<? extends ChainedDriver<?, ?>> chainedTask, TaskConfig taskConfig,
+ String taskName) {
+ this.chainedTask = chainedTask;
this.taskConfig = taskConfig;
this.taskName = taskName;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/CompilerTestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/CompilerTestBase.java b/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/CompilerTestBase.java
index e0da85d..f534ad9 100644
--- a/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/CompilerTestBase.java
+++ b/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/CompilerTestBase.java
@@ -12,7 +12,6 @@
**********************************************************************************************************************/
package eu.stratosphere.pact.compiler;
-import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
@@ -37,12 +36,6 @@ import eu.stratosphere.compiler.costs.DefaultCostEstimator;
import eu.stratosphere.compiler.plan.OptimizedPlan;
import eu.stratosphere.compiler.plan.PlanNode;
import eu.stratosphere.compiler.plan.SingleInputPlanNode;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.HardwareDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.instance.InstanceTypeDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
import eu.stratosphere.util.LogUtils;
import eu.stratosphere.util.OperatingSystem;
import eu.stratosphere.util.Visitor;
@@ -72,8 +65,6 @@ public abstract class CompilerTestBase implements java.io.Serializable {
protected transient PactCompiler noStatsCompiler;
- protected transient InstanceTypeDescription instanceType;
-
private transient int statCounter;
// ------------------------------------------------------------------------
@@ -85,29 +76,22 @@ public abstract class CompilerTestBase implements java.io.Serializable {
@Before
public void setup() {
- InetSocketAddress dummyAddr = new InetSocketAddress("localhost", 12345);
-
this.dataStats = new DataStatistics();
- this.withStatsCompiler = new PactCompiler(this.dataStats, new DefaultCostEstimator(), dummyAddr);
+ this.withStatsCompiler = new PactCompiler(this.dataStats, new DefaultCostEstimator());
this.withStatsCompiler.setDefaultDegreeOfParallelism(DEFAULT_PARALLELISM);
- this.noStatsCompiler = new PactCompiler(null, new DefaultCostEstimator(), dummyAddr);
+ this.noStatsCompiler = new PactCompiler(null, new DefaultCostEstimator());
this.noStatsCompiler.setDefaultDegreeOfParallelism(DEFAULT_PARALLELISM);
-
- // create the instance type description
- InstanceType iType = InstanceTypeFactory.construct("standard", 6, 2, 4096, 100, 0);
- HardwareDescription hDesc = HardwareDescriptionFactory.construct(2, 4096 * 1024 * 1024, 2000 * 1024 * 1024);
- this.instanceType = InstanceTypeDescriptionFactory.construct(iType, hDesc, DEFAULT_PARALLELISM * 2);
}
// ------------------------------------------------------------------------
public OptimizedPlan compileWithStats(Plan p) {
- return this.withStatsCompiler.compile(p, this.instanceType);
+ return this.withStatsCompiler.compile(p);
}
public OptimizedPlan compileNoStats(Plan p) {
- return this.noStatsCompiler.compile(p, this.instanceType);
+ return this.noStatsCompiler.compile(p);
}
public void setSourceStatistics(GenericDataSourceBase<?, ?> source, long size, float recordWidth) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java b/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
index 0e4177e..eff48cc 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/configuration/ConfigConstants.java
@@ -99,6 +99,11 @@ public final class ConfigConstants {
public static final String TASK_MANAGER_NETWORK_BUFFER_SIZE_KEY = "taskmanager.network.bufferSizeInBytes";
/**
+ * The config parameter defining the number of task slots of a task manager.
+ */
+ public static final String TASK_MANAGER_NUM_TASK_SLOTS = "taskmanager.numberOfTaskSlots";
+
+ /**
* The number of incoming network IO threads (e.g. incoming connection threads used in NettyConnectionManager
* for the ServerBootstrap.)
*/
@@ -290,12 +295,7 @@ public final class ConfigConstants {
/**
* The default degree of parallelism for operations.
*/
- public static final int DEFAULT_PARALLELIZATION_DEGREE = -1;
-
- /**
- * The default intra-node parallelism.
- */
- public static final int DEFAULT_MAX_INTRA_NODE_PARALLELIZATION_DEGREE = -1;
+ public static final int DEFAULT_PARALLELIZATION_DEGREE = 1;
// ------------------------------ Runtime ---------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-core/src/main/java/eu/stratosphere/util/ClassUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/util/ClassUtils.java b/stratosphere-core/src/main/java/eu/stratosphere/util/ClassUtils.java
index 96be666..f79dd2a 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/util/ClassUtils.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/util/ClassUtils.java
@@ -40,6 +40,7 @@ public final class ClassUtils {
throws ClassNotFoundException {
if (!className.contains("Protocol")) {
+ System.out.println(className);
throw new ClassNotFoundException("Only use this method for protocols!");
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/event/job/VertexAssignmentEvent.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/event/job/VertexAssignmentEvent.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/event/job/VertexAssignmentEvent.java
index f01e62d..c86c12b 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/event/job/VertexAssignmentEvent.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/event/job/VertexAssignmentEvent.java
@@ -38,11 +38,6 @@ public final class VertexAssignmentEvent extends AbstractEvent implements Manage
private String instanceName;
/**
- * The type of the instance the vertex is now assigned to.
- */
- private String instanceType;
-
- /**
* Constructs a new event.
*
* @param timestamp
@@ -51,16 +46,13 @@ public final class VertexAssignmentEvent extends AbstractEvent implements Manage
* identifies the vertex this event refers to
* @param instanceName
* the name of the instance the vertex is now assigned to
- * @param instanceType
- * the type of the instance the vertex is now assigned to
*/
public VertexAssignmentEvent(final long timestamp, final ManagementVertexID managementVertexID,
- final String instanceName, final String instanceType) {
+ final String instanceName) {
super(timestamp);
this.managementVertexID = managementVertexID;
this.instanceName = instanceName;
- this.instanceType = instanceType;
}
/**
@@ -90,16 +82,6 @@ public final class VertexAssignmentEvent extends AbstractEvent implements Manage
return this.instanceName;
}
- /**
- * Returns the type of the instance the vertex is now assigned to.
- *
- * @return the type of the instance the vertex is now assigned to
- */
- public String getInstanceType() {
- return this.instanceType;
- }
-
-
@Override
public void read(final DataInput in) throws IOException {
@@ -107,7 +89,6 @@ public final class VertexAssignmentEvent extends AbstractEvent implements Manage
this.managementVertexID.read(in);
this.instanceName = StringRecord.readString(in);
- this.instanceType = StringRecord.readString(in);
}
@@ -118,7 +99,6 @@ public final class VertexAssignmentEvent extends AbstractEvent implements Manage
this.managementVertexID.write(out);
StringRecord.writeString(out, this.instanceName);
- StringRecord.writeString(out, this.instanceType);
}
@@ -149,16 +129,6 @@ public final class VertexAssignmentEvent extends AbstractEvent implements Manage
}
}
- if (this.instanceType == null) {
- if (vae.getInstanceType() != null) {
- return false;
- }
- } else {
- if (!this.instanceType.equals(vae.getInstanceType())) {
- return false;
- }
- }
-
return true;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionEdge.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionEdge.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionEdge.java
index 0106361..920c47e 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionEdge.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionEdge.java
@@ -19,7 +19,6 @@ import eu.stratosphere.runtime.io.channels.ChannelType;
/**
* Objects of this class represent a pair of {@link eu.stratosphere.runtime.io.serialization.io.channels.InputChannel} and {@link AbstractOutputChannel} objects
* within an {@link ExecutionGraph}, Nephele's internal scheduling representation for jobs.
- *
*/
public final class ExecutionEdge {
@@ -51,42 +50,34 @@ public final class ExecutionEdge {
}
public ExecutionGate getInputGate() {
-
return this.inputGate;
}
public ExecutionGate getOutputGate() {
-
return this.outputGate;
}
public ChannelID getOutputChannelID() {
-
return this.outputChannelID;
}
public ChannelID getInputChannelID() {
-
return this.inputChannelID;
}
public int getOutputGateIndex() {
-
return this.outputGateIndex;
}
public int getInputGateIndex() {
-
return this.inputGateIndex;
}
public ChannelType getChannelType() {
-
return this.groupEdge.getChannelType();
}
public int getConnectionID() {
-
return this.groupEdge.getConnectionID();
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
index ca7eddb..c5059f9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
@@ -38,8 +38,6 @@ import eu.stratosphere.nephele.execution.ExecutionListener;
import eu.stratosphere.nephele.execution.ExecutionState;
import eu.stratosphere.nephele.instance.AllocatedResource;
import eu.stratosphere.nephele.instance.DummyInstance;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceType;
import eu.stratosphere.nephele.jobgraph.DistributionPattern;
import eu.stratosphere.runtime.io.gates.GateID;
import eu.stratosphere.runtime.io.channels.ChannelID;
@@ -160,18 +158,18 @@ public class ExecutionGraph implements ExecutionListener {
*
* @param job
* the user's job graph
- * @param instanceManager
- * the instance manager
+ * @param defaultParallelism
+ * defaultParallelism in case that nodes have no parallelism set
* @throws GraphConversionException
* thrown if the job graph is not valid and no execution graph can be constructed from it
*/
- public ExecutionGraph(final JobGraph job, final InstanceManager instanceManager)
+ public ExecutionGraph(final JobGraph job, final int defaultParallelism)
throws GraphConversionException {
this(job.getJobID(), job.getName(), job.getJobConfiguration());
// Start constructing the new execution graph from given job graph
try {
- constructExecutionGraph(job, instanceManager);
+ constructExecutionGraph(job, defaultParallelism);
} catch (GraphConversionException e) {
throw e; // forward graph conversion exceptions
} catch (Exception e) {
@@ -217,7 +215,6 @@ public class ExecutionGraph implements ExecutionListener {
final ExecutionGroupVertex groupVertex = it2.next();
if (groupVertex.isNumberOfMembersUserDefined()) {
groupVertex.createInitialExecutionVertices(groupVertex.getUserDefinedNumberOfMembers());
- groupVertex.repairSubtasksPerInstance();
}
}
@@ -253,12 +250,12 @@ public class ExecutionGraph implements ExecutionListener {
*
* @param jobGraph
* the job graph to create the execution graph from
- * @param instanceManager
- * the instance manager
+ * @param defaultParallelism
+ * defaultParallelism in case that nodes have no parallelism set
* @throws GraphConversionException
* thrown if the job graph is not valid and no execution graph can be constructed from it
*/
- private void constructExecutionGraph(final JobGraph jobGraph, final InstanceManager instanceManager)
+ private void constructExecutionGraph(final JobGraph jobGraph, final int defaultParallelism)
throws GraphConversionException {
// Clean up temporary data structures
@@ -272,8 +269,11 @@ public class ExecutionGraph implements ExecutionListener {
// Convert job vertices to execution vertices and initialize them
final AbstractJobVertex[] all = jobGraph.getAllJobVertices();
for (int i = 0; i < all.length; i++) {
- final ExecutionVertex createdVertex = createVertex(all[i], instanceManager, initialExecutionStage,
- jobGraph.getJobConfiguration());
+ if(all[i].getNumberOfSubtasks() == -1){
+ all[i].setNumberOfSubtasks(defaultParallelism);
+ }
+
+ final ExecutionVertex createdVertex = createVertex(all[i], initialExecutionStage);
temporaryVertexMap.put(all[i], createdVertex);
temporaryGroupVertexMap.put(all[i], createdVertex.getGroupVertex());
}
@@ -444,37 +444,15 @@ public class ExecutionGraph implements ExecutionListener {
*
* @param jobVertex
* the job vertex to create the execution vertex from
- * @param instanceManager
- * the instanceManager
* @param initialExecutionStage
* the initial execution stage all group vertices are added to
- * @param jobConfiguration
- * the configuration object originally attached to the {@link JobGraph}
* @return the new execution vertex
* @throws GraphConversionException
* thrown if the job vertex is of an unknown subclass
*/
- private ExecutionVertex createVertex(final AbstractJobVertex jobVertex, final InstanceManager instanceManager,
- final ExecutionStage initialExecutionStage, final Configuration jobConfiguration)
+ private ExecutionVertex createVertex(final AbstractJobVertex jobVertex, final ExecutionStage initialExecutionStage)
throws GraphConversionException {
- // If the user has requested instance type, check if the type is known by the current instance manager
- InstanceType instanceType = null;
- boolean userDefinedInstanceType = false;
- if (jobVertex.getInstanceType() != null) {
-
- userDefinedInstanceType = true;
- instanceType = instanceManager.getInstanceTypeByName(jobVertex.getInstanceType());
- if (instanceType == null) {
- throw new GraphConversionException("Requested instance type " + jobVertex.getInstanceType()
- + " is not known to the instance manager");
- }
- }
-
- if (instanceType == null) {
- instanceType = instanceManager.getDefaultInstanceType();
- }
-
// Create an initial execution vertex for the job vertex
final Class<? extends AbstractInvokable> invokableClass = jobVertex.getInvokableClass();
if (invokableClass == null) {
@@ -491,8 +469,7 @@ public class ExecutionGraph implements ExecutionListener {
ExecutionGroupVertex groupVertex = null;
try {
groupVertex = new ExecutionGroupVertex(jobVertex.getName(), jobVertex.getID(), this,
- jobVertex.getNumberOfSubtasks(), instanceType, userDefinedInstanceType,
- jobVertex.getNumberOfSubtasksPerInstance(), jobVertex.getVertexToShareInstancesWith() != null ? true
+ jobVertex.getNumberOfSubtasks(), jobVertex.getVertexToShareInstancesWith() != null ? true
: false, jobVertex.getNumberOfExecutionRetries(), jobVertex.getConfiguration(), signature,
invokableClass);
} catch (Throwable t) {
@@ -506,39 +483,6 @@ public class ExecutionGraph implements ExecutionListener {
throw new GraphConversionException(StringUtils.stringifyException(e));
}
- // Check if the user's specifications for the number of subtasks are valid
- final int minimumNumberOfSubtasks = jobVertex.getMinimumNumberOfSubtasks(groupVertex.getEnvironment()
- .getInvokable());
- final int maximumNumberOfSubtasks = jobVertex.getMaximumNumberOfSubtasks(groupVertex.getEnvironment()
- .getInvokable());
- if (jobVertex.getNumberOfSubtasks() != -1) {
- if (jobVertex.getNumberOfSubtasks() < 1) {
- throw new GraphConversionException("Cannot split task " + jobVertex.getName() + " into "
- + jobVertex.getNumberOfSubtasks() + " subtasks");
- }
-
- if (jobVertex.getNumberOfSubtasks() < minimumNumberOfSubtasks) {
- throw new GraphConversionException("Number of subtasks must be at least " + minimumNumberOfSubtasks);
- }
-
- if (maximumNumberOfSubtasks != -1) {
- if (jobVertex.getNumberOfSubtasks() > maximumNumberOfSubtasks) {
- throw new GraphConversionException("Number of subtasks for vertex " + jobVertex.getName()
- + " can be at most " + maximumNumberOfSubtasks);
- }
- }
- }
-
- // Check number of subtasks per instance
- if (jobVertex.getNumberOfSubtasksPerInstance() != -1 && jobVertex.getNumberOfSubtasksPerInstance() < 1) {
- throw new GraphConversionException("Cannot set number of subtasks per instance to "
- + jobVertex.getNumberOfSubtasksPerInstance() + " for vertex " + jobVertex.getName());
- }
-
- // Assign min/max to the group vertex (settings are actually applied in applyUserDefinedSettings)
- groupVertex.setMinMemberSize(minimumNumberOfSubtasks);
- groupVertex.setMaxMemberSize(maximumNumberOfSubtasks);
-
// Register input and output vertices separately
if (jobVertex instanceof AbstractJobInputVertex) {
@@ -579,8 +523,7 @@ public class ExecutionGraph implements ExecutionListener {
jobVertex.getNumberOfBackwardConnections());
// Assign initial instance to vertex (may be overwritten later on when user settings are applied)
- ev.setAllocatedResource(new AllocatedResource(DummyInstance.createDummyInstance(instanceType), instanceType,
- null));
+ ev.setAllocatedResource(new AllocatedResource(DummyInstance.createDummyInstance(), null));
return ev;
}
@@ -853,6 +796,48 @@ public class ExecutionGraph implements ExecutionListener {
}
/**
+ * Retrieves the maximum parallel degree of the job represented by this execution graph
+ */
+ public int getMaxNumberSubtasks() {
+ int maxDegree = 0;
+ final Iterator<ExecutionStage> stageIterator = this.stages.iterator();
+
+ while(stageIterator.hasNext()){
+ final ExecutionStage stage = stageIterator.next();
+
+ int maxPerStageDegree = stage.getMaxNumberSubtasks();
+
+ if(maxPerStageDegree > maxDegree){
+ maxDegree = maxPerStageDegree;
+ }
+ }
+
+ return maxDegree;
+ }
+
+ /**
+ * Retrieves the number of required slots to run this execution graph
+ * @return
+ */
+ public int getRequiredSlots(){
+ int maxRequiredSlots = 0;
+
+ final Iterator<ExecutionStage> stageIterator = this.stages.iterator();
+
+ while(stageIterator.hasNext()){
+ final ExecutionStage stage = stageIterator.next();
+
+ int requiredSlots = stage.getRequiredSlots();
+
+ if(requiredSlots > maxRequiredSlots){
+ maxRequiredSlots = requiredSlots;
+ }
+ }
+
+ return maxRequiredSlots;
+ }
+
+ /**
* Returns the stage which is currently executed.
*
* @return the currently executed stage or <code>null</code> if the job execution is already completed
@@ -1318,25 +1303,16 @@ public class ExecutionGraph implements ExecutionListener {
return this.jobName;
}
-
@Override
- public void userThreadStarted(final JobID jobID, final ExecutionVertexID vertexID, final Thread userThread) {
- // TODO Auto-generated method stub
-
- }
-
+ public void userThreadStarted(JobID jobID, ExecutionVertexID vertexID, Thread userThread) {}
@Override
- public void userThreadFinished(final JobID jobID, final ExecutionVertexID vertexID, final Thread userThread) {
- // TODO Auto-generated method stub
-
- }
+ public void userThreadFinished(JobID jobID, ExecutionVertexID vertexID, Thread userThread) {}
/**
* Reconstructs the execution pipelines for the entire execution graph.
*/
private void reconstructExecutionPipelines() {
-
final Iterator<ExecutionStage> it = this.stages.iterator();
while (it.hasNext()) {
@@ -1345,39 +1321,17 @@ public class ExecutionGraph implements ExecutionListener {
}
/**
- * Calculates the connection IDs of the graph to avoid deadlocks in the data flow at runtime.
- */
- private void calculateConnectionIDs() {
-
- final Set<ExecutionGroupVertex> alreadyVisited = new HashSet<ExecutionGroupVertex>();
- final ExecutionStage lastStage = getStage(getNumberOfStages() - 1);
-
- for (int i = 0; i < lastStage.getNumberOfStageMembers(); ++i) {
-
- final ExecutionGroupVertex groupVertex = lastStage.getStageMember(i);
-
- int currentConnectionID = 0;
-
- if (groupVertex.isOutputVertex()) {
- currentConnectionID = groupVertex.calculateConnectionID(currentConnectionID, alreadyVisited);
- }
- }
- }
-
- /**
* Returns an iterator over all execution stages contained in this graph.
*
* @return an iterator over all execution stages contained in this graph
*/
public Iterator<ExecutionStage> iterator() {
-
return this.stages.iterator();
}
@Override
public int getPriority() {
-
return 1;
}
@@ -1388,7 +1342,22 @@ public class ExecutionGraph implements ExecutionListener {
* the update command to be asynchronously executed on this graph
*/
public void executeCommand(final Runnable command) {
-
this.executorService.execute(command);
}
+
+ private void calculateConnectionIDs() {
+ final Set<ExecutionGroupVertex> alreadyVisited = new HashSet<ExecutionGroupVertex>();
+ final ExecutionStage lastStage = getStage(getNumberOfStages() - 1);
+
+ for (int i = 0; i < lastStage.getNumberOfStageMembers(); ++i) {
+
+ final ExecutionGroupVertex groupVertex = lastStage.getStageMember(i);
+
+ int currentConnectionID = 0;
+
+ if (groupVertex.isOutputVertex()) {
+ currentConnectionID = groupVertex.calculateConnectionID(currentConnectionID, alreadyVisited);
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
index 89b4b6d..c865609 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
@@ -18,7 +18,6 @@ import eu.stratosphere.core.io.InputSplit;
import eu.stratosphere.nephele.execution.RuntimeEnvironment;
import eu.stratosphere.nephele.instance.AllocatedResource;
import eu.stratosphere.nephele.instance.DummyInstance;
-import eu.stratosphere.nephele.instance.InstanceType;
import eu.stratosphere.nephele.jobgraph.JobVertexID;
import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.channels.ChannelType;
@@ -69,41 +68,11 @@ public final class ExecutionGroupVertex {
private final CopyOnWriteArrayList<ExecutionVertex> groupMembers = new CopyOnWriteArrayList<ExecutionVertex>();
/**
- * Maximum number of execution vertices this group vertex can manage.
- */
- private volatile int maxMemberSize = 1;
-
- /**
- * Minimum number of execution vertices this group vertex can manage.
- */
- private volatile int minMemberSize = 1;
-
- /**
* The user defined number of execution vertices, -1 if the user has not specified it.
*/
private final int userDefinedNumberOfMembers;
/**
- * The instance type to be used for execution vertices this group vertex manages.
- */
- private volatile InstanceType instanceType = null;
-
- /**
- * Stores whether the instance type is user defined.
- */
- private final boolean userDefinedInstanceType;
-
- /**
- * Stores the number of subtasks per instance.
- */
- private volatile int numberOfSubtasksPerInstance = -1;
-
- /**
- * Stores whether the number of subtasks per instance is user defined.
- */
- private final boolean userDefinedNumberOfSubtasksPerInstance;
-
- /**
* Number of retries in case of an error before the task represented by this vertex is considered as failed.
*/
private final int numberOfExecutionRetries;
@@ -175,12 +144,6 @@ public final class ExecutionGroupVertex {
* the execution graph is group vertex belongs to
* @param userDefinedNumberOfMembers
* the user defined number of subtasks, -1 if the user did not specify the number
- * @param instanceType
- * the instance type to be used for execution vertices this group vertex manages.
- * @param userDefinedInstanceType
- * <code>true</code> if the instance type is user defined, <code>false</code> otherwise
- * @param numberOfSubtasksPerInstance
- * the user defined number of subtasks per instance, -1 if the user did not specify the number
* @param userDefinedVertexToShareInstanceWith
* <code>true</code> if the user specified another vertex to share instances with, <code>false</code>
* otherwise
@@ -197,24 +160,13 @@ public final class ExecutionGroupVertex {
* throws if an error occurs while instantiating the {@link AbstractInvokable}
*/
public ExecutionGroupVertex(final String name, final JobVertexID jobVertexID, final ExecutionGraph executionGraph,
- final int userDefinedNumberOfMembers, final InstanceType instanceType,
- final boolean userDefinedInstanceType, final int numberOfSubtasksPerInstance,
- final boolean userDefinedVertexToShareInstanceWith, final int numberOfExecutionRetries,
- final Configuration configuration, final ExecutionSignature signature,
+ final int userDefinedNumberOfMembers, final boolean userDefinedVertexToShareInstanceWith,
+ final int numberOfExecutionRetries, final Configuration configuration, final ExecutionSignature signature,
final Class<? extends AbstractInvokable> invokableClass) throws Exception {
this.name = (name != null) ? name : "";
this.jobVertexID = jobVertexID;
this.userDefinedNumberOfMembers = userDefinedNumberOfMembers;
- this.instanceType = instanceType;
- this.userDefinedInstanceType = userDefinedInstanceType;
- if (numberOfSubtasksPerInstance != -1) {
- this.numberOfSubtasksPerInstance = numberOfSubtasksPerInstance;
- this.userDefinedNumberOfSubtasksPerInstance = true;
- } else {
- this.numberOfSubtasksPerInstance = 1;
- this.userDefinedNumberOfSubtasksPerInstance = false;
- }
if (numberOfExecutionRetries >= 0) {
this.numberOfExecutionRetries = numberOfExecutionRetries;
} else {
@@ -309,32 +261,6 @@ public final class ExecutionGroupVertex {
}
/**
- * Sets the maximum number of members this group vertex can have.
- *
- * @param maxSize
- * the maximum number of members this group vertex can have
- */
- void setMaxMemberSize(final int maxSize) {
-
- // TODO: Add checks here
-
- this.maxMemberSize = maxSize;
- }
-
- /**
- * Sets the minimum number of members this group vertex must have.
- *
- * @param minSize
- * the minimum number of members this group vertex must have
- */
- void setMinMemberSize(final int minSize) {
-
- // TODO: Add checks here
-
- this.minMemberSize = minSize;
- }
-
- /**
* Returns the current number of members this group vertex has.
*
* @return the current number of members this group vertex has
@@ -345,24 +271,6 @@ public final class ExecutionGroupVertex {
}
/**
- * Returns the maximum number of members this group vertex can have.
- *
- * @return the maximum number of members this group vertex can have
- */
- public int getMaximumNumberOfGroupMembers() {
- return this.maxMemberSize;
- }
-
- /**
- * Returns the minimum number of members this group vertex must have.
- *
- * @return the minimum number of members this group vertex must have
- */
- public int getMinimumNumberOfGroupMember() {
- return this.minMemberSize;
- }
-
- /**
* Wires this group vertex to the specified group vertex and creates
* a back link.
*
@@ -376,10 +284,6 @@ public final class ExecutionGroupVertex {
* the channel type to be used for this edge
* @param userDefinedChannelType
* <code>true</code> if the channel type is user defined, <code>false</code> otherwise
- * @param compressionLevel
- * the compression level to be used for this edge
- * @param userDefinedCompressionLevel
- * <code>true</code> if the compression level is user defined, <code>false</code> otherwise
* @param distributionPattern
* the distribution pattern to create the wiring between the group members
* @param isBroadcast
@@ -480,10 +384,10 @@ public final class ExecutionGroupVertex {
* @throws GraphConversionException
* thrown if the number of execution vertices for this group vertex cannot be set to the desired value
*/
- void createInitialExecutionVertices(final int initalNumberOfVertices) throws GraphConversionException {
+ void createInitialExecutionVertices(final int initialNumberOfVertices) throws GraphConversionException {
// If the requested number of group vertices does not change, do nothing
- if (initalNumberOfVertices == this.getCurrentNumberOfGroupMembers()) {
+ if (initialNumberOfVertices == this.getCurrentNumberOfGroupMembers()) {
return;
}
@@ -517,25 +421,14 @@ public final class ExecutionGroupVertex {
* }
*/
- if (initalNumberOfVertices < this.getMinimumNumberOfGroupMember()) {
- throw new GraphConversionException("Number of members must be at least "
- + this.getMinimumNumberOfGroupMember());
- }
-
- if ((this.getMaximumNumberOfGroupMembers() != -1)
- && (initalNumberOfVertices > this.getMaximumNumberOfGroupMembers())) {
- throw new GraphConversionException("Number of members cannot exceed "
- + this.getMaximumNumberOfGroupMembers());
- }
-
final ExecutionVertex originalVertex = this.getGroupMember(0);
int currentNumberOfExecutionVertices = this.getCurrentNumberOfGroupMembers();
- while (currentNumberOfExecutionVertices++ < initalNumberOfVertices) {
+ while (currentNumberOfExecutionVertices++ < initialNumberOfVertices) {
final ExecutionVertex vertex = originalVertex.splitVertex();
vertex.setAllocatedResource(new AllocatedResource(DummyInstance
- .createDummyInstance(this.instanceType), this.instanceType, null));
+ .createDummyInstance(), null));
this.groupMembers.add(vertex);
}
@@ -645,53 +538,6 @@ public final class ExecutionGroupVertex {
return this.userDefinedNumberOfMembers;
}
- boolean isInstanceTypeUserDefined() {
-
- return this.userDefinedInstanceType;
- }
-
- void setInstanceType(final InstanceType instanceType) throws GraphConversionException {
-
- if (instanceType == null) {
- throw new IllegalArgumentException("Argument instanceType must not be null");
- }
-
- if (this.userDefinedInstanceType) {
- throw new GraphConversionException("Cannot overwrite user defined instance type "
- + instanceType.getIdentifier());
- }
-
- this.instanceType = instanceType;
-
- // Reset instance allocation of all members and let reassignInstances do the work
- for (int i = 0; i < this.groupMembers.size(); i++) {
- final ExecutionVertex vertex = this.groupMembers.get(i);
- vertex.setAllocatedResource(null);
- }
- }
-
- InstanceType getInstanceType() {
- return this.instanceType;
- }
-
- boolean isNumberOfSubtasksPerInstanceUserDefined() {
-
- return this.userDefinedNumberOfSubtasksPerInstance;
- }
-
- void setNumberOfSubtasksPerInstance(final int numberOfSubtasksPerInstance) throws GraphConversionException {
-
- if (this.userDefinedNumberOfSubtasksPerInstance
- && (numberOfSubtasksPerInstance != this.numberOfSubtasksPerInstance)) {
- throw new GraphConversionException("Cannot overwrite user defined number of subtasks per instance");
- }
-
- this.numberOfSubtasksPerInstance = numberOfSubtasksPerInstance;
- }
-
- int getNumberOfSubtasksPerInstance() {
- return this.numberOfSubtasksPerInstance;
- }
/**
* Returns the number of retries in case of an error before the task represented by this vertex is considered as
@@ -766,27 +612,13 @@ public final class ExecutionGroupVertex {
}
- void repairSubtasksPerInstance() {
-
- final Iterator<ExecutionVertex> it = this.groupMembers.iterator();
- int count = 0;
- while (it.hasNext()) {
-
- final ExecutionVertex v = it.next();
- v.setAllocatedResource(this.groupMembers.get(
- (count++ / this.numberOfSubtasksPerInstance) * this.numberOfSubtasksPerInstance)
- .getAllocatedResource());
- }
- }
-
void repairInstanceSharing(final Set<AllocatedResource> availableResources) {
// Number of required resources by this group vertex
- final int numberOfRequiredInstances = (this.groupMembers.size() / this.numberOfSubtasksPerInstance)
- + (((this.groupMembers.size() % this.numberOfSubtasksPerInstance) != 0) ? 1 : 0);
+ final int numberOfRequiredSlots = this.groupMembers.size();
// Number of resources to be replaced
- final int resourcesToBeReplaced = Math.min(availableResources.size(), numberOfRequiredInstances);
+ final int resourcesToBeReplaced = Math.min(availableResources.size(), numberOfRequiredSlots);
// Build the replacement map if necessary
final Map<AllocatedResource, AllocatedResource> replacementMap = new HashMap<AllocatedResource, AllocatedResource>();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionStage.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionStage.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionStage.java
index eab2375..df29aef 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionStage.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionStage.java
@@ -15,18 +15,10 @@ package eu.stratosphere.nephele.executiongraph;
import java.util.HashSet;
import java.util.Iterator;
-import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import eu.stratosphere.nephele.execution.ExecutionState;
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.DummyInstance;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.runtime.io.channels.ChannelType;
/**
@@ -35,16 +27,10 @@ import eu.stratosphere.runtime.io.channels.ChannelType;
* job can only start to execute if the execution of its preceding stage is complete.
* <p>
* This class is thread-safe.
- *
*/
public final class ExecutionStage {
/**
- * The log object used for debugging.
- */
- private static final Log LOG = LogFactory.getLog(ExecutionStage.class);
-
- /**
* The execution graph that this stage belongs to.
*/
private final ExecutionGraph executionGraph;
@@ -242,69 +228,6 @@ public final class ExecutionStage {
}
/**
- * Checks which instance types and how many instances of these types are required to execute this stage
- * of the job graph. The required instance types and the number of instances are collected in the given map. Note
- * that this method does not clear the map before collecting the instances.
- *
- * @param instanceRequestMap
- * the map containing the instances types and the required number of instances of the respective type
- * @param executionState
- * the execution state the considered vertices must be in
- */
- public void collectRequiredInstanceTypes(final InstanceRequestMap instanceRequestMap,
- final ExecutionState executionState) {
-
- final Set<AbstractInstance> collectedInstances = new HashSet<AbstractInstance>();
- final ExecutionGroupVertexIterator groupIt = new ExecutionGroupVertexIterator(this.getExecutionGraph(), true,
- this.stageNum);
-
- while (groupIt.hasNext()) {
-
- final ExecutionGroupVertex groupVertex = groupIt.next();
- final Iterator<ExecutionVertex> vertexIt = groupVertex.iterator();
- while (vertexIt.hasNext()) {
-
- // Get the instance type from the execution vertex if it
- final ExecutionVertex vertex = vertexIt.next();
- if (vertex.getExecutionState() == executionState) {
- final AbstractInstance instance = vertex.getAllocatedResource().getInstance();
-
- if (collectedInstances.contains(instance)) {
- continue;
- } else {
- collectedInstances.add(instance);
- }
-
- if (instance instanceof DummyInstance) {
-
- final InstanceType instanceType = instance.getType();
- int num = instanceRequestMap.getMaximumNumberOfInstances(instanceType);
- ++num;
- instanceRequestMap.setMaximumNumberOfInstances(instanceType, num);
- if (groupVertex.isInputVertex()) {
- num = instanceRequestMap.getMinimumNumberOfInstances(instanceType);
- ++num;
- instanceRequestMap.setMinimumNumberOfInstances(instanceType, num);
- }
- } else {
- LOG.debug("Execution Vertex " + vertex.getName() + " (" + vertex.getID()
- + ") is already assigned to non-dummy instance, skipping...");
- }
- }
- }
- }
-
- final Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMaximumIterator();
- while (it.hasNext()) {
-
- final Map.Entry<InstanceType, Integer> entry = it.next();
- if (instanceRequestMap.getMinimumNumberOfInstances(entry.getKey()) == 0) {
- instanceRequestMap.setMinimumNumberOfInstances(entry.getKey(), entry.getValue());
- }
- }
- }
-
- /**
* Returns the execution graph that this stage belongs to.
*
* @return the execution graph that this stage belongs to
@@ -446,4 +369,37 @@ public final class ExecutionStage {
}
}
}
+
+ public int getMaxNumberSubtasks(){
+ int maxDegree = 0;
+
+ for(int i =0; i < this.getNumberOfStageMembers(); i++){
+ final ExecutionGroupVertex groupVertex = this.getStageMember(i);
+
+ if(groupVertex.getCurrentNumberOfGroupMembers() > maxDegree){
+ maxDegree = groupVertex.getCurrentNumberOfGroupMembers();
+ }
+ }
+
+ return maxDegree;
+ }
+
+ public int getRequiredSlots(){
+ Set<Instance> instanceSet = new HashSet<Instance>();
+
+ for(int i=0; i< this.getNumberOfStageMembers(); i++){
+ final ExecutionGroupVertex groupVertex = this.getStageMember(i);
+
+ final Iterator<ExecutionVertex> vertexIterator = groupVertex.iterator();
+
+ while(vertexIterator.hasNext()){
+ final ExecutionVertex vertex = vertexIterator.next();
+
+ instanceSet.add(vertex.getAllocatedResource().getInstance());
+ }
+
+ }
+
+ return instanceSet.size();
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionVertex.java
index 8e9395a..1e8d538 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionVertex.java
@@ -855,7 +855,6 @@ public final class ExecutionVertex {
* <code>false/<code> otherwise
*/
public boolean decrementRetriesLeftAndCheck() {
-
return (this.retriesLeft.decrementAndGet() > 0);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/InternalJobStatus.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/InternalJobStatus.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/InternalJobStatus.java
index 3a41aa2..8565495 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/InternalJobStatus.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/InternalJobStatus.java
@@ -74,6 +74,7 @@ public enum InternalJobStatus {
* the internal job status to converted.
* @return the corresponding job status or <code>null</code> if no corresponding job status exists
*/
+ @SuppressWarnings("incomplete-switch")
public static JobStatus toJobStatus(InternalJobStatus status) {
switch (status) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ManagementGraphFactory.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ManagementGraphFactory.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ManagementGraphFactory.java
index 72e3651..04c68b1 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ManagementGraphFactory.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ManagementGraphFactory.java
@@ -17,8 +17,8 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
-import eu.stratosphere.nephele.instance.AbstractInstance;
import eu.stratosphere.runtime.io.channels.ChannelType;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.nephele.managementgraph.ManagementEdge;
import eu.stratosphere.nephele.managementgraph.ManagementEdgeID;
import eu.stratosphere.nephele.managementgraph.ManagementGate;
@@ -120,12 +120,11 @@ public class ManagementGraphFactory {
final ExecutionVertex ev = iterator.next();
final ManagementGroupVertex parent = groupMap.get(ev.getGroupVertex());
- final AbstractInstance instance = ev.getAllocatedResource().getInstance();
+ final Instance instance = ev.getAllocatedResource().getInstance();
final ManagementVertex managementVertex = new ManagementVertex(
parent,
ev.getID().toManagementVertexID(),
- (instance.getInstanceConnectionInfo() != null) ? instance.getInstanceConnectionInfo().toString() : instance.toString(),
- instance.getType().toString(),
+ (instance.getInstanceConnectionInfo() != null) ? instance.getInstanceConnectionInfo().toString() : instance.toString(),
ev.getIndexInVertexGroup()
);
managementVertex.setExecutionState(ev.getExecutionState());
[21/22] git commit: Merge fix to omit input/output registering on
JobManager Rework Invokable Task Hierarchy
Posted by se...@apache.org.
Merge fix to omit input/output registering on JobManager
Rework Invokable Task Hierarchy
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/8c1d82a8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/8c1d82a8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/8c1d82a8
Branch: refs/heads/master
Commit: 8c1d82a8ec674de6525319501c6be2674e3143f1
Parents: 2692643
Author: Stephan Ewen <se...@apache.org>
Authored: Fri Jun 20 21:13:23 2014 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:21 2014 +0200
----------------------------------------------------------------------
.../stratosphere/client/program/ClientTest.java | 29 +-
.../plantranslate/NepheleJobGraphGenerator.java | 66 ++---
.../stratosphere/api/common/PlanExecutor.java | 2 -
.../api/common/io/FileOutputFormat.java | 291 +++++--------------
.../stratosphere/api/common/io/FormatUtil.java | 1 -
.../api/common/io/InitializeOnMaster.java | 35 +++
.../api/common/io/OutputFormat.java | 15 +-
.../configuration/Configuration.java | 16 +-
.../eu/stratosphere/core/fs/FileSystem.java | 3 +-
.../eu/stratosphere/core/io/StringRecord.java | 6 +-
.../eu/stratosphere/util/IterableIterator.java | 4 +-
.../api/java/io/PrintingOutputFormat.java | 3 -
.../nephele/execution/RuntimeEnvironment.java | 1 -
.../nephele/executiongraph/ExecutionGraph.java | 104 ++++---
.../executiongraph/ExecutionGroupVertex.java | 1 -
.../jobgraph/AbstractJobInputVertex.java | 19 +-
.../jobgraph/AbstractJobOutputVertex.java | 9 +-
.../nephele/jobgraph/AbstractJobVertex.java | 31 +-
.../stratosphere/nephele/jobgraph/JobGraph.java | 31 +-
.../nephele/jobgraph/JobInputVertex.java | 155 ++--------
.../nephele/jobgraph/JobOutputVertex.java | 132 ++-------
.../nephele/jobgraph/JobTaskVertex.java | 51 +---
.../nephele/jobmanager/JobManager.java | 4 +-
.../splitassigner/InputSplitManager.java | 2 -
.../LocatableInputSplitAssigner.java | 2 -
.../file/FileInputSplitAssigner.java | 5 -
.../nephele/taskmanager/TaskManager.java | 2 +-
.../nephele/template/AbstractInputTask.java | 79 -----
.../nephele/template/AbstractInvokable.java | 1 -
.../nephele/template/AbstractOutputTask.java | 22 --
.../nephele/template/AbstractTask.java | 21 --
.../runtime/iterative/io/FakeOutputTask.java | 4 +-
.../task/IterationSynchronizationSinkTask.java | 4 +-
.../iterative/task/IterationTailPactTask.java | 8 +-
.../pact/runtime/task/DataSinkTask.java | 10 +-
.../pact/runtime/task/DataSourceTask.java | 109 +++----
.../pact/runtime/task/RegularPactTask.java | 16 +-
.../pact/runtime/task/util/TaskConfig.java | 6 +-
.../runtime/io/api/MutableRecordReader.java | 38 +--
.../runtime/io/api/RecordReader.java | 18 +-
.../runtime/io/api/RecordWriter.java | 22 +-
.../executiongraph/ExecutionGraphTest.java | 163 ++++++-----
.../ForwardTask1Input1Output.java | 4 +-
.../ForwardTask1Input2Outputs.java | 4 +-
.../ForwardTask2Inputs1Output.java | 4 +-
.../executiongraph/SelfCrossForwardTask.java | 13 +-
.../nephele/jobmanager/DoubleSourceTask.java | 132 +++++++++
.../nephele/jobmanager/DoubleTargetTask.java | 24 +-
.../jobmanager/ExceptionOutputFormat.java | 26 +-
.../nephele/jobmanager/ExceptionTask.java | 11 +-
.../nephele/jobmanager/ForwardTask.java | 16 +-
.../nephele/jobmanager/JobManagerITCase.java | 158 +++++-----
.../jobmanager/RuntimeExceptionTask.java | 13 +-
.../nephele/jobmanager/UnionTask.java | 22 +-
.../scheduler/queue/DefaultSchedulerTest.java | 68 ++---
.../nephele/util/tasks/DoubleSourceTask.java | 134 +++++++++
.../nephele/util/tasks/FileLineReader.java | 133 +++++++++
.../nephele/util/tasks/FileLineWriter.java | 72 +++++
.../nephele/util/tasks/JobFileInputVertex.java | 255 ++++++++++++++++
.../nephele/util/tasks/JobFileOutputVertex.java | 109 +++++++
.../runtime/hash/HashMatchIteratorITCase.java | 4 +-
.../runtime/hash/ReOpenableHashTableITCase.java | 3 +-
.../pact/runtime/io/ChannelViewsTest.java | 4 +-
.../pact/runtime/io/SpillingBufferTest.java | 4 +-
.../sort/AsynchonousPartialSorterITCase.java | 10 +-
.../CombiningUnilateralSortMergerITCase.java | 4 +-
.../pact/runtime/sort/ExternalSortITCase.java | 8 +-
.../sort/MassiveStringSortingITCase.java | 8 +-
.../sort/SortMergeMatchIteratorITCase.java | 11 +-
.../task/util/HashVsSortMiniBenchmark.java | 4 +-
.../pact/runtime/test/util/DummyInvokable.java | 6 +-
.../pact/runtime/test/util/TaskTestBase.java | 15 +-
.../bufferprovider/LocalBufferPoolTest.java | 6 +
.../TransitiveClosureITCase.java | 2 +-
.../test/iterative/nephele/JobGraphUtils.java | 13 +-
.../recordJobs/util/DiscardingOutputFormat.java | 20 +-
.../test/runtime/NetworkStackThroughput.java | 47 ++-
77 files changed, 1567 insertions(+), 1341 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java b/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
index b3f8159..a948706 100644
--- a/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
+++ b/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
@@ -28,10 +28,7 @@ import org.mockito.Mock;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
-import eu.stratosphere.api.common.InvalidProgramException;
import eu.stratosphere.api.common.Plan;
-import eu.stratosphere.api.java.LocalEnvironment;
-import eu.stratosphere.client.LocalExecutor;
import eu.stratosphere.compiler.DataStatistics;
import eu.stratosphere.compiler.PactCompiler;
import eu.stratosphere.compiler.costs.CostEstimator;
@@ -134,16 +131,16 @@ public class ClientTest {
verify(this.jobClientMock).submitJob();
}
-
- @Test(expected=InvalidProgramException.class)
- public void tryLocalExecution() throws Exception {
- new Client(configMock);
- LocalExecutor.execute(planMock);
- }
-
- @Test(expected=InvalidProgramException.class)
- public void tryLocalEnvironmentExecution() throws Exception {
- new Client(configMock);
- new LocalEnvironment();
- }
-}
+//
+// @Test(expected=InvalidProgramException.class)
+// public void tryLocalExecution() throws Exception {
+// new Client(configMock);
+// LocalExecutor.execute(planMock);
+// }
+//
+// @Test(expected=InvalidProgramException.class)
+// public void tryLocalEnvironmentExecution() throws Exception {
+// new Client(configMock);
+// new LocalEnvironment();
+// }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
index 3089cdb..3c1e9e3 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
@@ -20,14 +20,7 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-
-import eu.stratosphere.api.common.io.InputFormat;
-import eu.stratosphere.api.common.io.OutputFormat;
-import eu.stratosphere.api.common.operators.util.UserCodeWrapper;
-import eu.stratosphere.core.io.InputSplit;
-import eu.stratosphere.nephele.template.AbstractInputTask;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import java.util.Map.Entry;
import eu.stratosphere.api.common.aggregators.AggregatorRegistry;
import eu.stratosphere.api.common.aggregators.AggregatorWithName;
@@ -66,7 +59,6 @@ import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
import eu.stratosphere.nephele.jobgraph.JobInputVertex;
import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobTaskVertex;
-import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.pact.runtime.iterative.convergence.WorksetEmptyConvergenceCriterion;
import eu.stratosphere.pact.runtime.iterative.io.FakeOutputTask;
import eu.stratosphere.pact.runtime.iterative.task.IterationHeadPactTask;
@@ -760,7 +752,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
} else {
// create task vertex
vertex = new JobTaskVertex(taskName, this.jobGraph);
- vertex.setTaskClass( (this.currentIteration != null && node.isOnDynamicPath()) ? IterationIntermediatePactTask.class : RegularPactTask.class);
+ vertex.setInvokableClass((this.currentIteration != null && node.isOnDynamicPath()) ? IterationIntermediatePactTask.class : RegularPactTask.class);
config = new TaskConfig(vertex.getConfiguration());
config.setDriver(ds.getDriverClass());
@@ -786,7 +778,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
final DriverStrategy ds = node.getDriverStrategy();
final JobTaskVertex vertex = new JobTaskVertex(taskName, this.jobGraph);
final TaskConfig config = new TaskConfig(vertex.getConfiguration());
- vertex.setTaskClass( (this.currentIteration != null && node.isOnDynamicPath()) ? IterationIntermediatePactTask.class : RegularPactTask.class);
+ vertex.setInvokableClass( (this.currentIteration != null && node.isOnDynamicPath()) ? IterationIntermediatePactTask.class : RegularPactTask.class);
// set user code
config.setStubWrapper(node.getPactContract().getUserCodeWrapper());
@@ -812,31 +804,29 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
private JobInputVertex createDataSourceVertex(SourcePlanNode node) throws CompilerException {
final JobInputVertex vertex = new JobInputVertex(node.getNodeName(), this.jobGraph);
+ final TaskConfig config = new TaskConfig(vertex.getConfiguration());
- // set task class
- @SuppressWarnings("unchecked")
- final Class<AbstractInputTask<?>> clazz = (Class<AbstractInputTask<?>>) (Class<?>) DataSourceTask
- .class;
- vertex.setInputClass(clazz);
+ vertex.setInvokableClass(DataSourceTask.class);
// set user code
- vertex.setInputFormat((UserCodeWrapper<? extends InputFormat<?, InputSplit>>)node.getPactContract()
- .getUserCodeWrapper());
- vertex.setInputFormatParameters(node.getPactContract().getParameters());
- vertex.setOutputSerializer(node.getSerializer());
+ config.setStubWrapper(node.getPactContract().getUserCodeWrapper());
+ config.setStubParameters(node.getPactContract().getParameters());
+
+ config.setOutputSerializer(node.getSerializer());
return vertex;
}
private AbstractJobOutputVertex createDataSinkVertex(SinkPlanNode node) throws CompilerException {
final JobOutputVertex vertex = new JobOutputVertex(node.getNodeName(), this.jobGraph);
+ final TaskConfig config = new TaskConfig(vertex.getConfiguration());
- vertex.setOutputClass(DataSinkTask.class);
+ vertex.setInvokableClass(DataSinkTask.class);
vertex.getConfiguration().setInteger(DataSinkTask.DEGREE_OF_PARALLELISM_KEY, node.getDegreeOfParallelism());
// set user code
- vertex.setOutputFormat((UserCodeWrapper<? extends OutputFormat<?>>)node.getPactContract().getUserCodeWrapper());
- vertex.setOutputFormatParameters(node.getPactContract().getParameters());
-
+ config.setStubWrapper(node.getPactContract().getUserCodeWrapper());
+ config.setStubParameters(node.getPactContract().getParameters());
+
return vertex;
}
@@ -884,7 +874,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
}
// reset the vertex type to iteration head
- headVertex.setTaskClass(IterationHeadPactTask.class);
+ headVertex.setInvokableClass(IterationHeadPactTask.class);
headConfig = new TaskConfig(headVertex.getConfiguration());
toReturn = null;
} else {
@@ -892,7 +882,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
// everything else happens in the post visit, after the input (the initial partial solution)
// is connected.
headVertex = new JobTaskVertex("PartialSolution ("+iteration.getNodeName()+")", this.jobGraph);
- headVertex.setTaskClass(IterationHeadPactTask.class);
+ headVertex.setInvokableClass(IterationHeadPactTask.class);
headConfig = new TaskConfig(headVertex.getConfiguration());
headConfig.setDriver(NoOpDriver.class);
toReturn = headVertex;
@@ -952,7 +942,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
}
// reset the vertex type to iteration head
- headVertex.setTaskClass(IterationHeadPactTask.class);
+ headVertex.setInvokableClass(IterationHeadPactTask.class);
headConfig = new TaskConfig(headVertex.getConfiguration());
toReturn = null;
} else {
@@ -960,7 +950,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
// everything else happens in the post visit, after the input (the initial partial solution)
// is connected.
headVertex = new JobTaskVertex("IterationHead("+iteration.getNodeName()+")", this.jobGraph);
- headVertex.setTaskClass(IterationHeadPactTask.class);
+ headVertex.setInvokableClass(IterationHeadPactTask.class);
headConfig = new TaskConfig(headVertex.getConfiguration());
headConfig.setDriver(NoOpDriver.class);
toReturn = headVertex;
@@ -1144,7 +1134,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
// --------------------------- create the sync task ---------------------------
final JobOutputVertex sync = new JobOutputVertex("Sync(" +
bulkNode.getNodeName() + ")", this.jobGraph);
- sync.setOutputClass(IterationSynchronizationSinkTask.class);
+ sync.setInvokableClass(IterationSynchronizationSinkTask.class);
sync.setNumberOfSubtasks(1);
this.auxVertices.add(sync);
@@ -1192,14 +1182,14 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
// No following termination criterion
if(rootOfStepFunction.getOutgoingChannels().isEmpty()) {
- rootOfStepFunctionVertex.setTaskClass(IterationTailPactTask.class);
+ rootOfStepFunctionVertex.setInvokableClass(IterationTailPactTask.class);
tailConfig.setOutputSerializer(bulkNode.getSerializerForIterationChannel());
tailConfig.addOutputShipStrategy(ShipStrategyType.FORWARD);
// create the fake output task
JobOutputVertex fakeTail = new JobOutputVertex("Fake Tail", this.jobGraph);
- fakeTail.setOutputClass(FakeOutputTask.class);
+ fakeTail.setInvokableClass(FakeOutputTask.class);
fakeTail.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
this.auxVertices.add(fakeTail);
@@ -1234,14 +1224,14 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
tailConfigOfTerminationCriterion = new TaskConfig(rootOfTerminationCriterionVertex.getConfiguration());
}
- rootOfTerminationCriterionVertex.setTaskClass(IterationTailPactTask.class);
+ rootOfTerminationCriterionVertex.setInvokableClass(IterationTailPactTask.class);
// Hack
tailConfigOfTerminationCriterion.setIsSolutionSetUpdate();
tailConfigOfTerminationCriterion.setOutputSerializer(bulkNode.getSerializerForIterationChannel());
tailConfigOfTerminationCriterion.addOutputShipStrategy(ShipStrategyType.FORWARD);
JobOutputVertex fakeTailTerminationCriterion = new JobOutputVertex("Fake Tail for Termination Criterion", this.jobGraph);
- fakeTailTerminationCriterion.setOutputClass(FakeOutputTask.class);
+ fakeTailTerminationCriterion.setInvokableClass(FakeOutputTask.class);
fakeTailTerminationCriterion.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
this.auxVertices.add(fakeTailTerminationCriterion);
@@ -1309,7 +1299,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
{
final JobOutputVertex sync = new JobOutputVertex("Sync (" +
iterNode.getNodeName() + ")", this.jobGraph);
- sync.setOutputClass(IterationSynchronizationSinkTask.class);
+ sync.setInvokableClass(IterationSynchronizationSinkTask.class);
sync.setNumberOfSubtasks(1);
this.auxVertices.add(sync);
@@ -1367,14 +1357,14 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
worksetTailConfig.setIsWorksetUpdate();
if (hasWorksetTail) {
- nextWorksetVertex.setTaskClass(IterationTailPactTask.class);
+ nextWorksetVertex.setInvokableClass(IterationTailPactTask.class);
worksetTailConfig.setOutputSerializer(iterNode.getWorksetSerializer());
worksetTailConfig.addOutputShipStrategy(ShipStrategyType.FORWARD);
// create the fake output task
JobOutputVertex fakeTail = new JobOutputVertex("Fake Tail", this.jobGraph);
- fakeTail.setOutputClass(FakeOutputTask.class);
+ fakeTail.setInvokableClass(FakeOutputTask.class);
fakeTail.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
this.auxVertices.add(fakeTail);
@@ -1405,14 +1395,14 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
solutionDeltaConfig.setIsSolutionSetUpdate();
if (hasSolutionSetTail) {
- solutionDeltaVertex.setTaskClass(IterationTailPactTask.class);
+ solutionDeltaVertex.setInvokableClass(IterationTailPactTask.class);
solutionDeltaConfig.setOutputSerializer(iterNode.getSolutionSetSerializer());
solutionDeltaConfig.addOutputShipStrategy(ShipStrategyType.FORWARD);
// create the fake output task
JobOutputVertex fakeTail = new JobOutputVertex("Fake Tail", this.jobGraph);
- fakeTail.setOutputClass(FakeOutputTask.class);
+ fakeTail.setInvokableClass(FakeOutputTask.class);
fakeTail.setNumberOfSubtasks(headVertex.getNumberOfSubtasks());
this.auxVertices.add(fakeTail);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/api/common/PlanExecutor.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/PlanExecutor.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/PlanExecutor.java
index 7caaab2..d91abf8 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/PlanExecutor.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/PlanExecutor.java
@@ -53,7 +53,6 @@ public abstract class PlanExecutor {
* Creates an executor that runs the plan locally in a multi-threaded environment.
*
* @return A local executor.
- * @see eu.stratosphere.client.LocalExecutor
*/
public static PlanExecutor createLocalExecutor() {
Class<? extends PlanExecutor> leClass = loadExecutorClass(LOCAL_EXECUTOR_CLASS);
@@ -75,7 +74,6 @@ public abstract class PlanExecutor {
* @param jarFiles A list of jar files that contain the user-defined function (UDF) classes and all classes used
* from within the UDFs.
* @return A remote executor.
- * @see eu.stratosphere.client.RemoteExecutor
*/
public static PlanExecutor createRemoteExecutor(String hostname, int port, String... jarFiles) {
if (hostname == null) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
index c4e1d5a..d43c987 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
@@ -31,14 +31,14 @@ import eu.stratosphere.core.fs.Path;
* The abstract base class for all output formats that are file based. Contains the logic to open/close the target
* file streams.
*/
-public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
+public abstract class FileOutputFormat<IT> implements OutputFormat<IT>, InitializeOnMaster {
+
private static final long serialVersionUID = 1L;
// --------------------------------------------------------------------------------------------
/**
- * Defines the behavior for creating output directories.
- *
+ * Behavior for creating output directories.
*/
public static enum OutputDirectoryMode {
@@ -54,7 +54,7 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
private static WriteMode DEFAULT_WRITE_MODE;
- private static OutputDirectoryMode DEFAULT_OUTPUT_DIRECTORY_MODE;
+ private static OutputDirectoryMode DEFAULT_OUTPUT_DIRECTORY_MODE;
private static final void initDefaultsFromConfiguration() {
@@ -100,11 +100,6 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
*/
private OutputDirectoryMode outputDirectoryMode;
- /**
- * Stream opening timeout.
- */
- private long openTimeout = -1;
-
// --------------------------------------------------------------------------------------------
/**
@@ -158,19 +153,6 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
return this.outputDirectoryMode;
}
-
- public void setOpenTimeout(long timeout) {
- if (timeout < 0) {
- throw new IllegalArgumentException("The timeout must be a nonnegative numer of milliseconds (zero for infinite).");
- }
-
- this.openTimeout = (timeout == 0) ? Long.MAX_VALUE : timeout;
- }
-
- public long getOpenTimeout() {
- return this.openTimeout;
- }
-
// ----------------------------------------------------------------
@Override
@@ -200,34 +182,58 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
if (this.outputDirectoryMode == null) {
this.outputDirectoryMode = DEFAULT_OUTPUT_DIRECTORY_MODE;
}
-
- if (this.openTimeout == -1) {
- this.openTimeout = FileInputFormat.getDefaultOpeningTimeout();
- }
}
@Override
public void open(int taskNumber, int numTasks) throws IOException {
+ if (taskNumber < 0 || numTasks < 1) {
+ throw new IllegalArgumentException("TaskNumber: " + taskNumber + ", numTasks: " + numTasks);
+ }
if (LOG.isDebugEnabled()) {
- LOG.debug("Openint stream for output (" + (taskNumber+1) + "/" + numTasks + "). WriteMode=" + writeMode +
- ", OutputDirectoryMode=" + outputDirectoryMode + ", timeout=" + openTimeout);
+ LOG.debug("Opening stream for output (" + (taskNumber+1) + "/" + numTasks + "). WriteMode=" + writeMode +
+ ", OutputDirectoryMode=" + outputDirectoryMode);
}
- // obtain FSDataOutputStream asynchronously, since HDFS client is vulnerable to InterruptedExceptions
- OutputPathOpenThread opot = new OutputPathOpenThread(this, (taskNumber + 1), numTasks);
- opot.start();
+ Path p = this.outputFilePath;
+ if (p == null) {
+ throw new IOException("The file path is null.");
+ }
- try {
- // get FSDataOutputStream
- this.stream = opot.waitForCompletion();
+ final FileSystem fs = p.getFileSystem();
+
+ // if this is a local file system, we need to initialize the local output directory here
+ if (!fs.isDistributedFS()) {
+
+ if (numTasks == 1 && outputDirectoryMode == OutputDirectoryMode.PARONLY) {
+ // output should go to a single file
+
+ // prepare local output path. checks for write mode and removes existing files in case of OVERWRITE mode
+ if(!fs.initOutPathLocalFS(p, writeMode, false)) {
+ // output preparation failed! Cancel task.
+ throw new IOException("Output path could not be initialized. Canceling task...");
+ }
+ }
+ else {
+ // numTasks > 1 || outDirMode == OutputDirectoryMode.ALWAYS
+
+ if(!fs.initOutPathLocalFS(p, writeMode, true)) {
+ // output preparation failed! Cancel task.
+ throw new IOException("Output directory could not be created. Canceling task...");
+ }
+ }
}
- catch (Exception e) {
- throw new RuntimeException("Stream to output file could not be opened: " + e.getMessage(), e);
+
+
+ // Suffix the path with the parallel instance index, if needed
+ if (numTasks > 1 || outputDirectoryMode == OutputDirectoryMode.ALWAYS) {
+ p = p.suffix("/" + (taskNumber+1));
}
- }
+ // create output file
+ this.stream = fs.create(p, writeMode == WriteMode.OVERWRITE);
+ }
@Override
public void close() throws IOException {
@@ -238,153 +244,37 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
}
}
- // ============================================================================================
-
- private static final class OutputPathOpenThread extends Thread {
-
- private final Path path;
-
- private final int taskIndex;
-
- private final int numTasks;
-
- private final WriteMode writeMode;
-
- private final OutputDirectoryMode outDirMode;
-
- private final long timeoutMillies;
-
- private volatile FSDataOutputStream fdos;
-
- private volatile Throwable error;
-
- private volatile boolean aborted;
-
-
- public OutputPathOpenThread(FileOutputFormat<?> fof, int taskIndex, int numTasks) {
- this.path = fof.getOutputFilePath();
- this.writeMode = fof.getWriteMode();
- this.outDirMode = fof.getOutputDirectoryMode();
- this.timeoutMillies = fof.getOpenTimeout();
- this.taskIndex = taskIndex;
- this.numTasks = numTasks;
- }
-
- @Override
- public void run() {
-
- try {
- Path p = this.path;
- final FileSystem fs = p.getFileSystem();
-
- // initialize output path.
- if(this.numTasks == 1 && outDirMode == OutputDirectoryMode.PARONLY) {
- // output is not written in parallel and should go to a single file
-
- if(!fs.isDistributedFS()) {
- // prepare local output path
- // checks for write mode and removes existing files in case of OVERWRITE mode
- if(!fs.initOutPathLocalFS(p, writeMode, false)) {
- // output preparation failed! Cancel task.
- throw new IOException("Output path could not be initialized. Canceling task.");
- }
- }
-
- } else if(this.numTasks > 1 || outDirMode == OutputDirectoryMode.ALWAYS) {
- // output is written in parallel into a directory or should always be written to a directory
-
- if(!fs.isDistributedFS()) {
- // File system is not distributed.
- // We need to prepare the output path on each executing node.
- if(!fs.initOutPathLocalFS(p, writeMode, true)) {
- // output preparation failed! Cancel task.
- throw new IOException("Output directory could not be created. Canceling task.");
- }
- }
-
- // Suffix the path with the parallel instance index
- p = p.suffix("/" + this.taskIndex);
-
- } else {
- // invalid number of subtasks (<= 0)
- throw new IllegalArgumentException("Invalid number of subtasks. Canceling task.");
- }
-
- // create output file
- switch(writeMode) {
- case NO_OVERWRITE:
- this.fdos = fs.create(p, false);
- break;
- case OVERWRITE:
- this.fdos = fs.create(p, true);
- break;
- default:
- throw new IllegalArgumentException("Invalid write mode: "+writeMode);
- }
-
- // check for canceling and close the stream in that case, because no one will obtain it
- if (this.aborted) {
- final FSDataOutputStream f = this.fdos;
- this.fdos = null;
- f.close();
- }
- }
- catch (Throwable t) {
- this.error = t;
- }
- }
+ /**
+ * Initialization of the distributed file system if it is used.
+ *
+ * @param parallelism The task parallelism.
+ */
+ @Override
+ public void initializeGlobal(int parallelism) throws IOException {
+ final Path path = getOutputFilePath();
+ final FileSystem fs = path.getFileSystem();
- public FSDataOutputStream waitForCompletion() throws Exception {
- final long start = System.currentTimeMillis();
- long remaining = this.timeoutMillies;
+ // only distributed file systems can be initialized at start-up time.
+ if (fs.isDistributedFS()) {
- do {
- try {
- this.join(remaining);
- } catch (InterruptedException iex) {
- // we were canceled, so abort the procedure
- abortWait();
- throw iex;
+ final WriteMode writeMode = getWriteMode();
+ final OutputDirectoryMode outDirMode = getOutputDirectoryMode();
+
+ if (parallelism == 1 && outDirMode == OutputDirectoryMode.PARONLY) {
+ // output is not written in parallel and should be written to a single file.
+ // prepare distributed output path
+ if(!fs.initOutPathDistFS(path, writeMode, false)) {
+ // output preparation failed! Cancel task.
+ throw new IOException("Output path could not be initialized.");
}
- }
- while (this.error == null && this.fdos == null &&
- (remaining = this.timeoutMillies + start - System.currentTimeMillis()) > 0);
-
- if (this.error != null) {
- throw new IOException("Opening the file output stream failed" +
- (this.error.getMessage() == null ? "." : ": " + this.error.getMessage()), this.error);
- }
-
- if (this.fdos != null) {
- return this.fdos;
+
} else {
- // double-check that the stream has not been set by now. we don't know here whether
- // a) the opener thread recognized the canceling and closed the stream
- // b) the flag was set such that the stream did not see it and we have a valid stream
- // In any case, close the stream and throw an exception.
- abortWait();
-
- final boolean stillAlive = this.isAlive();
- final StringBuilder bld = new StringBuilder(256);
- for (StackTraceElement e : this.getStackTrace()) {
- bld.append("\tat ").append(e.toString()).append('\n');
+ // output should be written to a directory
+
+ // only distributed file systems can be initialized at start-up time.
+ if(!fs.initOutPathDistFS(path, writeMode, true)) {
+ throw new IOException("Output directory could not be created.");
}
- throw new IOException("Output opening request timed out. Opener was " + (stillAlive ? "" : "NOT ") +
- " alive. Stack:\n" + bld.toString());
- }
- }
-
- /**
- * Double checked procedure setting the abort flag and closing the stream.
- */
- private final void abortWait() {
- this.aborted = true;
- final FSDataOutputStream outStream = this.fdos;
- this.fdos = null;
- if (outStream != null) {
- try {
- outStream.close();
- } catch (Throwable t) {}
}
}
}
@@ -437,47 +327,4 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
super(targetConfig);
}
}
-
- /**
- * Initialization of the distributed file system if it is used.
- *
- * @param configuration The task configuration
- */
- @Override
- public void initialize(Configuration configuration){
- final Path path = this.getOutputFilePath();
- final WriteMode writeMode = this.getWriteMode();
- final OutputDirectoryMode outDirMode = this.getOutputDirectoryMode();
-
- // Prepare output path and determine max DOP
- try {
- final FileSystem fs = path.getFileSystem();
-
- int dop = configuration.getInteger(DEGREE_OF_PARALLELISM_KEY, -1);
- if(dop == 1 && outDirMode == OutputDirectoryMode.PARONLY) {
- // output is not written in parallel and should be written to a single file.
-
- if(fs.isDistributedFS()) {
- // prepare distributed output path
- if(!fs.initOutPathDistFS(path, writeMode, false)) {
- // output preparation failed! Cancel task.
- throw new IOException("Output path could not be initialized.");
- }
- }
- } else {
- // output should be written to a directory
-
- if(fs.isDistributedFS()) {
- // only distributed file systems can be initialized at start-up time.
- if(!fs.initOutPathDistFS(path, writeMode, true)) {
- throw new IOException("Output directory could not be created.");
- }
- }
- }
- }
- catch (IOException e) {
- LOG.error("Could not access the file system to detemine the status of the output.", e);
- throw new RuntimeException("I/O Error while accessing file", e);
- }
- }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FormatUtil.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FormatUtil.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FormatUtil.java
index f191c61..ec1033e 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FormatUtil.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FormatUtil.java
@@ -153,7 +153,6 @@ public class FormatUtil {
{
final F outputFormat = ReflectionUtil.newInstance(outputFormatClass);
outputFormat.setOutputFilePath(new Path(path));
- outputFormat.setOpenTimeout(0);
outputFormat.setWriteMode(WriteMode.OVERWRITE);
configuration = configuration == null ? new Configuration() : configuration;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/InitializeOnMaster.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/InitializeOnMaster.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/InitializeOnMaster.java
new file mode 100644
index 0000000..86fdee2
--- /dev/null
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/InitializeOnMaster.java
@@ -0,0 +1,35 @@
+/***********************************************************************************************************************
+ *
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ *
+ **********************************************************************************************************************/
+
+package eu.stratosphere.api.common.io;
+
+import java.io.IOException;
+
+/**
+ * This interface may be implemented by {@link OutputFormat}s to have the master initialize them globally.
+ *
+ * For example, the {@link FileOutputFormat} implements this behavior for distributed file systems and
+ * creates/deletes target directories if necessary.
+ */
+public interface InitializeOnMaster {
+
+ /**
+ * The method is invoked on the master (JobManager) before the distributed program execution starts.
+ *
+ * @param parallelism The degree of parallelism with which the format or functions will be run.
+ * @throws IOException The initialization may throw exceptions, which may cause the job to abort.
+ */
+ void initializeGlobal(int parallelism) throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
index bdc59e4..72dddf4 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
@@ -20,14 +20,13 @@ import eu.stratosphere.configuration.Configuration;
/**
- * Describes the base interface that is used describe an output that consumes records. The output format
+ * The base interface for outputs that consumes records. The output format
* describes how to store the final records, for example in a file.
* <p>
* The life cycle of an output format is the following:
* <ol>
- * <li>After being instantiated (parameterless), it is configured with a {@link Configuration} object.
- * Basic fields are read from the configuration, such as for example a file path, if the format describes
- * files as the sink for the records.</li>
+ * <li>configure() is invoked a single time. The method can be used to implement initialization from
+ * the parameters (configuration) that may be attached upon instantiation.</li>
* <li>Each parallel output task creates an instance, configures it and opens it.</li>
* <li>All records of its parallel instance are handed to the output format.</li>
* <li>The output format is closed</li>
@@ -79,13 +78,5 @@ public interface OutputFormat<IT> extends Serializable {
* @throws IOException Thrown, if the input could not be closed properly.
*/
void close() throws IOException;
-
- /**
- * Method which is called on the JobManager node prior to execution. It can be used to set up output format
- * related tasks.
- *
- * @param configuration The task configuration
- */
- void initialize(Configuration configuration);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
index 451577f..6b9436b 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
@@ -34,17 +34,19 @@ import eu.stratosphere.core.io.StringRecord;
* This class is thread-safe.
*
*/
-public class Configuration implements IOReadableWritable {
+public class Configuration implements IOReadableWritable, java.io.Serializable {
+
+ private static final long serialVersionUID = 1L;
/**
* Stores the concrete key/value pairs of this configuration object.
*/
- private Map<String, String> confData = new HashMap<String, String>();
+ private final Map<String, String> confData = new HashMap<String, String>();
/**
* The class loader to be used for the <code>getClass</code> method.
*/
- private ClassLoader classLoader;
+ private transient ClassLoader classLoader;
/**
* Constructs a new configuration object.
@@ -446,7 +448,6 @@ public class Configuration implements IOReadableWritable {
// --------------------------------------------------------------------------------------------
-
@Override
public void read(final DataInput in) throws IOException {
@@ -479,6 +480,13 @@ public class Configuration implements IOReadableWritable {
}
}
}
+
+ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ this.classLoader = getClass().getClassLoader();
+ }
+
+ // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/core/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/core/fs/FileSystem.java b/stratosphere-core/src/main/java/eu/stratosphere/core/fs/FileSystem.java
index 11c7007..8e65636 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/core/fs/FileSystem.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/core/fs/FileSystem.java
@@ -459,7 +459,7 @@ public abstract class FileSystem {
// path exists, check write mode
switch (writeMode) {
case NO_OVERWRITE:
- if (status.isDir()) {
+ if (status.isDir() && createDirectory) {
return true;
} else {
// file may not be overwritten
@@ -467,6 +467,7 @@ public abstract class FileSystem {
WriteMode.NO_OVERWRITE.name() + " mode. Use " + WriteMode.OVERWRITE.name() +
" mode to overwrite existing files and directories.");
}
+
case OVERWRITE:
if (status.isDir()) {
if (createDirectory) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/core/io/StringRecord.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/core/io/StringRecord.java b/stratosphere-core/src/main/java/eu/stratosphere/core/io/StringRecord.java
index 50c2599..de2358b 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/core/io/StringRecord.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/core/io/StringRecord.java
@@ -34,6 +34,8 @@ import java.text.CharacterIterator;
import java.text.StringCharacterIterator;
import java.util.Arrays;
+import eu.stratosphere.types.Value;
+
/**
* This class stores text using standard UTF8 encoding. It provides methods to
* serialize, deserialize, and compare texts at byte level. The type of length
@@ -44,7 +46,9 @@ import java.util.Arrays;
* Also includes utilities for serializing/deserialing a string, coding/decoding a string, checking if a byte array
* contains valid UTF8 code, calculating the length of an encoded string.
*/
-public class StringRecord implements IOReadableWritable {
+public class StringRecord implements Value {
+
+ private static final long serialVersionUID = 1L;
private static final ThreadLocal<CharsetEncoder> ENCODER_FACTORY = new ThreadLocal<CharsetEncoder>() {
protected CharsetEncoder initialValue() {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-core/src/main/java/eu/stratosphere/util/IterableIterator.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/util/IterableIterator.java b/stratosphere-core/src/main/java/eu/stratosphere/util/IterableIterator.java
index 16f610a..b59e2e6 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/util/IterableIterator.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/util/IterableIterator.java
@@ -18,9 +18,9 @@ package eu.stratosphere.util;
import java.util.Iterator;
/**
- * An {@link Iterator] that is also {@link Iterable} (often by returning itself).
+ * An {@link Iterator} that is also {@link Iterable} (often by returning itself).
*
- * @param <T> The iterated elements' type.
+ * @param <E> The iterated elements' type.
*/
public interface IterableIterator<E> extends Iterator<E>, Iterable<E> {
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java b/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
index d1736d4..5c09439 100644
--- a/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
+++ b/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
@@ -95,7 +95,4 @@ public class PrintingOutputFormat<T> implements OutputFormat<T> {
public String toString() {
return "Print to " + (target == STD_OUT ? "System.out" : "System.err");
}
-
- @Override
- public void initialize(Configuration configuration){}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
index 2416b07..ae5198a 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
@@ -43,7 +43,6 @@ import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.util.ArrayDeque;
import java.util.Collections;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
index 1c4a820..18395fb 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
@@ -31,11 +31,13 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import eu.stratosphere.api.common.io.InitializeOnMaster;
+import eu.stratosphere.api.common.io.OutputFormat;
import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.configuration.IllegalConfigurationException;
import eu.stratosphere.core.io.InputSplit;
import eu.stratosphere.nephele.execution.ExecutionListener;
import eu.stratosphere.nephele.execution.ExecutionState;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
import eu.stratosphere.nephele.instance.AllocatedResource;
import eu.stratosphere.nephele.instance.DummyInstance;
import eu.stratosphere.nephele.jobgraph.DistributionPattern;
@@ -45,11 +47,11 @@ import eu.stratosphere.runtime.io.channels.ChannelType;
import eu.stratosphere.nephele.jobgraph.AbstractJobInputVertex;
import eu.stratosphere.nephele.jobgraph.AbstractJobVertex;
import eu.stratosphere.nephele.jobgraph.JobEdge;
-import eu.stratosphere.nephele.jobgraph.JobFileOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobID;
+import eu.stratosphere.nephele.jobgraph.JobInputVertex;
+import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
import eu.stratosphere.nephele.taskmanager.ExecutorThreadFactory;
-import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.util.StringUtils;
@@ -462,42 +464,68 @@ public class ExecutionGraph implements ExecutionListener {
: false, jobVertex.getNumberOfExecutionRetries(), jobVertex.getConfiguration(), signature,
invokableClass);
} catch (Throwable t) {
- throw new GraphConversionException(StringUtils.stringifyException(t));
+ throw new GraphConversionException(t);
}
// Register input and output vertices separately
if (jobVertex instanceof AbstractJobInputVertex) {
- final InputSplit[] inputSplits;
-
+ final AbstractJobInputVertex jobInputVertex = (AbstractJobInputVertex) jobVertex;
+
+ if (jobVertex instanceof JobInputVertex) {
+ try {
+ // get a handle to the user code class loader
+ ClassLoader cl = LibraryCacheManager.getClassLoader(jobVertex.getJobGraph().getJobID());
+
+ ((JobInputVertex) jobVertex).initializeInputFormatFromTaskConfig(cl);
+ }
+ catch (Throwable t) {
+ throw new GraphConversionException("Could not deserialize input format.", t);
+ }
+ }
+
final Class<? extends InputSplit> inputSplitType = jobInputVertex.getInputSplitType();
+
+ InputSplit[] inputSplits;
- try{
+ try {
inputSplits = jobInputVertex.getInputSplits(jobVertex.getNumberOfSubtasks());
- }catch(Exception e) {
- throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName() + ": "
- + StringUtils.stringifyException(e));
+ }
+ catch (Throwable t) {
+ throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName(), t);
}
if (inputSplits == null) {
- LOG.info("Job input vertex " + jobVertex.getName() + " generated 0 input splits");
- } else {
- LOG.info("Job input vertex " + jobVertex.getName() + " generated " + inputSplits.length
- + " input splits");
+ inputSplits = new InputSplit[0];
}
+
+ LOG.info("Job input vertex " + jobVertex.getName() + " generated " + inputSplits.length + " input splits");
// assign input splits and type
groupVertex.setInputSplits(inputSplits);
groupVertex.setInputSplitType(inputSplitType);
}
- if(jobVertex instanceof JobOutputVertex){
+ if (jobVertex instanceof JobOutputVertex){
final JobOutputVertex jobOutputVertex = (JobOutputVertex) jobVertex;
+
+ try {
+ // get a handle to the user code class loader
+ ClassLoader cl = LibraryCacheManager.getClassLoader(jobVertex.getJobGraph().getJobID());
+ jobOutputVertex.initializeOutputFormatFromTaskConfig(cl);
+ }
+ catch (Throwable t) {
+ throw new GraphConversionException("Could not deserialize output format.", t);
+ }
- final OutputFormat<?> outputFormat = jobOutputVertex.getOutputFormat();
-
- if(outputFormat != null){
- outputFormat.initialize(groupVertex.getConfiguration());
+ OutputFormat<?> outputFormat = jobOutputVertex.getOutputFormat();
+ if (outputFormat != null && outputFormat instanceof InitializeOnMaster){
+ try {
+ ((InitializeOnMaster) outputFormat).initializeGlobal(jobVertex.getNumberOfSubtasks());
+ }
+ catch (Throwable t) {
+ throw new GraphConversionException(t);
+ }
}
}
@@ -519,7 +547,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return the number of input vertices registered with this execution graph
*/
public int getNumberOfInputVertices() {
-
return this.stages.get(0).getNumberOfInputExecutionVertices();
}
@@ -531,7 +558,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return the number of input vertices for the given stage
*/
public int getNumberOfInputVertices(int stage) {
-
if (stage >= this.stages.size()) {
return 0;
}
@@ -545,7 +571,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return the number of output vertices registered with this execution graph
*/
public int getNumberOfOutputVertices() {
-
return this.stages.get(0).getNumberOfOutputExecutionVertices();
}
@@ -557,7 +582,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return the number of input vertices for the given stage
*/
public int getNumberOfOutputVertices(final int stage) {
-
if (stage >= this.stages.size()) {
return 0;
}
@@ -574,7 +598,6 @@ public class ExecutionGraph implements ExecutionListener {
* exists
*/
public ExecutionVertex getInputVertex(final int index) {
-
return this.stages.get(0).getInputExecutionVertex(index);
}
@@ -587,7 +610,6 @@ public class ExecutionGraph implements ExecutionListener {
* exists
*/
public ExecutionVertex getOutputVertex(final int index) {
-
return this.stages.get(0).getOutputExecutionVertex(index);
}
@@ -602,7 +624,6 @@ public class ExecutionGraph implements ExecutionListener {
* exists in that stage
*/
public ExecutionVertex getInputVertex(final int stage, final int index) {
-
try {
final ExecutionStage s = this.stages.get(stage);
if (s == null) {
@@ -627,7 +648,6 @@ public class ExecutionGraph implements ExecutionListener {
* exists in that stage
*/
public ExecutionVertex getOutputVertex(final int stage, final int index) {
-
try {
final ExecutionStage s = this.stages.get(stage);
if (s == null) {
@@ -649,7 +669,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return the execution stage with number <code>num</code> or <code>null</code> if no such execution stage exists
*/
public ExecutionStage getStage(final int num) {
-
try {
return this.stages.get(num);
} catch (ArrayIndexOutOfBoundsException e) {
@@ -663,7 +682,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return the number of execution stages in the execution graph
*/
public int getNumberOfStages() {
-
return this.stages.size();
}
@@ -676,7 +694,6 @@ public class ExecutionGraph implements ExecutionListener {
* exists in the execution graph
*/
public ExecutionVertex getVertexByChannelID(final ChannelID id) {
-
final ExecutionEdge edge = this.edgeMap.get(id);
if (edge == null) {
return null;
@@ -697,7 +714,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return the edge whose ID matches <code>id</code> or <code>null</code> if no such edge is known
*/
public ExecutionEdge getEdgeByID(final ChannelID id) {
-
return this.edgeMap.get(id);
}
@@ -708,7 +724,6 @@ public class ExecutionGraph implements ExecutionListener {
* the execution vertex to register
*/
void registerExecutionVertex(final ExecutionVertex vertex) {
-
if (this.vertexMap.put(vertex.getID(), vertex) != null) {
throw new IllegalStateException("There is already an execution vertex with ID " + vertex.getID()
+ " registered");
@@ -724,7 +739,6 @@ public class ExecutionGraph implements ExecutionListener {
* found
*/
public ExecutionVertex getVertexByID(final ExecutionVertexID id) {
-
return this.vertexMap.get(id);
}
@@ -735,7 +749,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return <code>true</code> if stage is completed, <code>false</code> otherwise
*/
private boolean isCurrentStageCompleted() {
-
if (this.indexToCurrentExecutionStage >= this.stages.size()) {
return true;
}
@@ -758,7 +771,6 @@ public class ExecutionGraph implements ExecutionListener {
* @return <code>true</code> if the execution of the graph is finished, <code>false</code> otherwise
*/
public boolean isExecutionFinished() {
-
return (getJobStatus() == InternalJobStatus.FINISHED);
}
@@ -1307,4 +1319,26 @@ public class ExecutionGraph implements ExecutionListener {
}
}
}
+
+ /**
+ * Retrieves the number of required slots to run this execution graph
+ * @return
+ */
+ public int getRequiredSlots(){
+ int maxRequiredSlots = 0;
+
+ final Iterator<ExecutionStage> stageIterator = this.stages.iterator();
+
+ while(stageIterator.hasNext()){
+ final ExecutionStage stage = stageIterator.next();
+
+ int requiredSlots = stage.getRequiredSlots();
+
+ if(requiredSlots > maxRequiredSlots){
+ maxRequiredSlots = requiredSlots;
+ }
+ }
+
+ return maxRequiredSlots;
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
index 91e9e53..dceeb90 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
@@ -15,7 +15,6 @@ package eu.stratosphere.nephele.executiongraph;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.io.InputSplit;
-import eu.stratosphere.nephele.execution.RuntimeEnvironment;
import eu.stratosphere.nephele.instance.AllocatedResource;
import eu.stratosphere.nephele.instance.DummyInstance;
import eu.stratosphere.nephele.jobgraph.JobVertexID;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
index e4d3b9d..b901742 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
@@ -15,11 +15,8 @@ package eu.stratosphere.nephele.jobgraph;
import eu.stratosphere.core.io.InputSplit;
-import java.io.IOException;
-
/**
- * An abstract base class for input vertices in Nephele.
- *
+ * An abstract base class for input vertices.
*/
public abstract class AbstractJobInputVertex extends AbstractJobVertex {
@@ -28,12 +25,24 @@ public abstract class AbstractJobInputVertex extends AbstractJobVertex {
*
* @param name
* the name of the new job input vertex
+ * @param jobGraph
+ * the job graph this vertex belongs to
+ */
+ protected AbstractJobInputVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
+ }
+
+ /**
+ * Constructs a new job input vertex with the given name.
+ *
+ * @param name
+ * the name of the new job input vertex
* @param id
* the ID of this vertex
* @param jobGraph
* the job graph this vertex belongs to
*/
- protected AbstractJobInputVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
+ protected AbstractJobInputVertex(String name, JobVertexID id, JobGraph jobGraph) {
super(name, id, jobGraph);
jobGraph.addVertex(this);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobOutputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobOutputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobOutputVertex.java
index 849df4b..6020f24 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobOutputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobOutputVertex.java
@@ -24,14 +24,15 @@ public abstract class AbstractJobOutputVertex extends AbstractJobVertex {
*
* @param name
* the name of the new job output vertex
- * @param id
- * the ID of this vertex
* @param jobGraph
* the job graph this vertex belongs to
*/
- protected AbstractJobOutputVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
+ protected AbstractJobOutputVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
+ }
+
+ protected AbstractJobOutputVertex(String name, JobVertexID id, JobGraph jobGraph) {
super(name, id, jobGraph);
-
jobGraph.addVertex(this);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
index cdadd3c..cc936d9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
@@ -18,8 +18,9 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
+import org.apache.commons.lang.Validate;
+
import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.configuration.IllegalConfigurationException;
import eu.stratosphere.core.io.IOReadableWritable;
import eu.stratosphere.core.io.StringRecord;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
@@ -29,8 +30,7 @@ import eu.stratosphere.nephele.util.EnumUtils;
import eu.stratosphere.util.StringUtils;
/**
- * An abstract base class for a job vertex in Nephele.
- *
+ * An abstract base class for a job vertex.
*/
public abstract class AbstractJobVertex implements IOReadableWritable {
@@ -86,19 +86,30 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
*/
protected Class<? extends AbstractInvokable> invokableClass = null;
+
/**
* Constructs a new job vertex and assigns it with the given name.
*
* @param name
* the name of the new job vertex
- * @param id
- * the ID of this vertex
* @param jobGraph
* the job graph this vertex belongs to
*/
- protected AbstractJobVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
+ protected AbstractJobVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
+ }
+
+ /**
+ * Constructs a new job vertex and assigns it with the given name.
+ *
+ * @param name
+ * the name of the new job vertex
+ * @param jobGraph
+ * the job graph this vertex belongs to
+ */
+ protected AbstractJobVertex(String name, JobVertexID id, JobGraph jobGraph) {
this.name = name == null ? DEFAULT_NAME : name;
- this.id = (id == null) ? new JobVertexID() : id;
+ this.id = id == null ? new JobVertexID() : id;
this.jobGraph = jobGraph;
}
@@ -572,13 +583,17 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
return this.configuration;
}
+ public void setInvokableClass(Class<? extends AbstractInvokable> invokable) {
+ Validate.notNull(invokable);
+ this.invokableClass = invokable;
+ }
+
/**
* Returns the invokable class which represents the task of this vertex
*
* @return the invokable class, <code>null</code> if it is not set
*/
public Class<? extends AbstractInvokable> getInvokableClass() {
-
return this.invokableClass;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
index f048b0d..3d14d0a 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
@@ -26,8 +26,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.Stack;
-import java.util.Vector;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.fs.FSDataInputStream;
@@ -77,11 +75,6 @@ public class JobGraph implements IOReadableWritable {
private Configuration jobConfiguration = new Configuration();
/**
- * The configuration which should be applied to the task managers involved in processing this job.
- */
- private final Configuration taskManagerConfiguration = new Configuration();
-
- /**
* List of JAR files required to run this job.
*/
private final ArrayList<Path> userJars = new ArrayList<Path>();
@@ -134,24 +127,12 @@ public class JobGraph implements IOReadableWritable {
}
/**
- * Returns the configuration object distributed among the task managers
- * before they start processing this job.
- *
- * @return the configuration object for the task managers, or <code>null</code> if it is not set
- */
- public Configuration getTaskmanagerConfiguration() {
-
- return this.taskManagerConfiguration;
- }
-
- /**
* Adds a new input vertex to the job graph if it is not already included.
*
* @param inputVertex
* the new input vertex to be added
*/
- public void addVertex(final AbstractJobInputVertex inputVertex) {
-
+ public void addVertex(AbstractJobInputVertex inputVertex) {
if (!inputVertices.containsKey(inputVertex.getID())) {
inputVertices.put(inputVertex.getID(), inputVertex);
}
@@ -163,8 +144,7 @@ public class JobGraph implements IOReadableWritable {
* @param taskVertex
* the new task vertex to be added
*/
- public void addVertex(final JobTaskVertex taskVertex) {
-
+ public void addVertex(JobTaskVertex taskVertex) {
if (!taskVertices.containsKey(taskVertex.getID())) {
taskVertices.put(taskVertex.getID(), taskVertex);
}
@@ -176,8 +156,7 @@ public class JobGraph implements IOReadableWritable {
* @param outputVertex
* the new output vertex to be added
*/
- public void addVertex(final AbstractJobOutputVertex outputVertex) {
-
+ public void addVertex(AbstractJobOutputVertex outputVertex) {
if (!outputVertices.containsKey(outputVertex.getID())) {
outputVertices.put(outputVertex.getID(), outputVertex);
}
@@ -570,9 +549,6 @@ public class JobGraph implements IOReadableWritable {
// Re-instantiate the job configuration object and read the configuration
this.jobConfiguration = new Configuration(cl);
this.jobConfiguration.read(in);
-
- // Read the task manager configuration
- this.taskManagerConfiguration.read(in);
}
@@ -610,7 +586,6 @@ public class JobGraph implements IOReadableWritable {
// Write out configuration objects
this.jobConfiguration.write(out);
- this.taskManagerConfiguration.write(out);
}
/**
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
index 29f98d9..bf8f544 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
@@ -13,41 +13,21 @@
package eu.stratosphere.nephele.jobgraph;
+import java.io.IOException;
+
import eu.stratosphere.api.common.io.InputFormat;
-import eu.stratosphere.api.common.operators.util.UserCodeObjectWrapper;
import eu.stratosphere.api.common.operators.util.UserCodeWrapper;
-import eu.stratosphere.api.common.typeutils.TypeSerializerFactory;
-import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.io.InputSplit;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.pact.runtime.task.util.TaskConfig;
-import java.io.DataInput;
-import java.io.IOException;
-
public class JobInputVertex extends AbstractJobInputVertex {
- /**
- * Input format associated to this JobInputVertex. It is either directly set or reconstructed from the task
- * configuration. Every job input vertex requires an input format to compute the input splits and the input split
- * type.
- */
- private volatile InputFormat<?, ? extends InputSplit> inputFormat = null;
- /**
- * Creates a new job input vertex with the specified name.
- *
- * @param name
- * The name of the new job file input vertex.
- * @param id
- * The ID of this vertex.
- * @param jobGraph
- * The job graph this vertex belongs to.
- */
- public JobInputVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
+ private InputFormat<?, ?> inputFormat;
+
+ public JobInputVertex(String name, JobVertexID id, JobGraph jobGraph) {
super(name, id, jobGraph);
}
-
+
/**
* Creates a new job file input vertex with the specified name.
*
@@ -56,8 +36,8 @@ public class JobInputVertex extends AbstractJobInputVertex {
* @param jobGraph
* The job graph this vertex belongs to.
*/
- public JobInputVertex(final String name, final JobGraph jobGraph) {
- super(name, null, jobGraph);
+ public JobInputVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
}
/**
@@ -66,112 +46,23 @@ public class JobInputVertex extends AbstractJobInputVertex {
* @param jobGraph
* The job graph this vertex belongs to.
*/
- public JobInputVertex(final JobGraph jobGraph) {
- super(null, null, jobGraph);
- }
-
- /**
- * Sets the class of the vertex's input task.
- *
- * @param inputClass
- * The class of the vertex's input task.
- */
- public void setInputClass(final Class<? extends AbstractInputTask<?>> inputClass) {
- this.invokableClass = inputClass;
- }
-
- /**
- * Returns the class of the vertex's input task.
- *
- * @return the class of the vertex's input task or <code>null</code> if no task has yet been set
- */
- @SuppressWarnings("unchecked")
- public Class<? extends AbstractInputTask<?>> getInputClass() {
- return (Class<? extends AbstractInputTask<?>>) this.invokableClass;
- }
-
- /**
- * Sets the input format and writes it to the task configuration. It extracts it from the UserCodeWrapper.
- *
- * @param inputFormatWrapper Wrapped input format
- */
- public void setInputFormat(UserCodeWrapper<? extends InputFormat<?, ? extends InputSplit>> inputFormatWrapper) {
- TaskConfig config = new TaskConfig(this.getConfiguration());
- config.setStubWrapper(inputFormatWrapper);
-
- inputFormat = inputFormatWrapper.getUserCodeObject();
- }
-
- /**
- * Sets the input format and writes it to the task configuration.
- *
- * @param inputFormat Input format
- */
- public void setInputFormat(InputFormat<?, ? extends InputSplit> inputFormat) {
- this.inputFormat = inputFormat;
-
- UserCodeWrapper<? extends InputFormat<?, ? extends InputSplit>> wrapper = new
- UserCodeObjectWrapper<InputFormat<?, ? extends InputSplit>>(inputFormat);
- TaskConfig config = new TaskConfig(this.getConfiguration());
- config.setStubWrapper(wrapper);
+ public JobInputVertex(JobGraph jobGraph) {
+ this(null, jobGraph);
}
-
- /**
- * Sets the input format parameters.
- *
- * @param inputFormatParameters Input format parameters
- */
- public void setInputFormatParameters(Configuration inputFormatParameters){
- TaskConfig config = new TaskConfig(this.getConfiguration());
- config.setStubParameters(inputFormatParameters);
-
- if(inputFormat == null){
- throw new RuntimeException("There is no input format set in job vertex: " + this.getID());
- }
-
- inputFormat.configure(inputFormatParameters);
+
+ public void setInputFormat(InputFormat<?, ?> format) {
+ this.inputFormat = format;
}
-
- /**
- * Sets the output serializer for the task associated to this vertex.
- *
- * @param factory Type serializer factory
- */
- public void setOutputSerializer(TypeSerializerFactory<?> factory){
- TaskConfig config = new TaskConfig(this.getConfiguration());
- config.setOutputSerializer(factory);
- }
-
- /**
- * Deserializes the input format from the deserialized task configuration. It then configures the input format by
- * calling the configure method with the current configuration.
- *
- * @param input
- * @throws IOException
- */
- @Override
- public void read(final DataInput input) throws IOException{
- super.read(input);
-
- // load input format wrapper from the config
- ClassLoader cl = null;
-
- try{
- cl = LibraryCacheManager.getClassLoader(this.getJobGraph().getJobID());
+
+ public void initializeInputFormatFromTaskConfig(ClassLoader cl) {
+ TaskConfig cfg = new TaskConfig(getConfiguration());
+
+ UserCodeWrapper<InputFormat<?, ?>> wrapper = cfg.<InputFormat<?, ?>>getStubWrapper(cl);
+
+ if (wrapper != null) {
+ this.inputFormat = wrapper.getUserCodeObject(InputFormat.class, cl);
+ this.inputFormat.configure(cfg.getStubParameters());
}
- catch (IOException ioe) {
- throw new RuntimeException("Usercode ClassLoader could not be obtained for job: " +
- this.getJobGraph().getJobID(), ioe);
- }
-
- final Configuration config = this.getConfiguration();
- config.setClassLoader(cl);
- final TaskConfig taskConfig = new TaskConfig(config);
-
- inputFormat = taskConfig.<InputFormat<?, InputSplit>>getStubWrapper(cl).getUserCodeObject(InputFormat.class,
- cl);
-
- inputFormat.configure(taskConfig.getStubParameters());
}
/**
@@ -197,7 +88,7 @@ public class JobInputVertex extends AbstractJobInputVertex {
*/
@Override
public InputSplit[] getInputSplits(int minNumSplits) throws IOException {
- if(inputFormat == null){
+ if (inputFormat == null){
throw new RuntimeException("No input format has been set for job vertex: "+ this.getID());
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
index cf937a0..abe6be9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
@@ -14,41 +14,20 @@
package eu.stratosphere.nephele.jobgraph;
import eu.stratosphere.api.common.io.OutputFormat;
-import eu.stratosphere.api.common.operators.util.UserCodeObjectWrapper;
import eu.stratosphere.api.common.operators.util.UserCodeWrapper;
-import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
import eu.stratosphere.pact.runtime.task.util.TaskConfig;
-import java.io.DataInput;
-import java.io.IOException;
-
/**
- * A JobOutputVertex is a specific subtype of a {@link AbstractJobOutputVertex} and is designed
+ * A JobOutputVertex is a specific sub-type of a {@link AbstractJobOutputVertex} and is designed
* for Nephele tasks which sink data in a not further specified way. As every job output vertex,
* a JobOutputVertex must not have any further output.
- *
*/
public class JobOutputVertex extends AbstractJobOutputVertex {
/**
* Contains the output format associated to this output vertex. It can be <pre>null</pre>.
*/
- private volatile OutputFormat<?> outputFormat = null;
+ private OutputFormat<?> outputFormat;
- /**
- * Creates a new job file output vertex with the specified name.
- *
- * @param name
- * the name of the new job file output vertex
- * @param id
- * the ID of this vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobOutputVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
- super(name, id, jobGraph);
- }
/**
* Creates a new job file output vertex with the specified name.
@@ -58,8 +37,12 @@ public class JobOutputVertex extends AbstractJobOutputVertex {
* @param jobGraph
* the job graph this vertex belongs to
*/
- public JobOutputVertex(final String name, final JobGraph jobGraph) {
- super(name, null, jobGraph);
+ public JobOutputVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
+ }
+
+ public JobOutputVertex(String name, JobVertexID id, JobGraph jobGraph) {
+ super(name, id, jobGraph);
}
/**
@@ -68,94 +51,21 @@ public class JobOutputVertex extends AbstractJobOutputVertex {
* @param jobGraph
* the job graph this vertex belongs to
*/
- public JobOutputVertex(final JobGraph jobGraph) {
- super(null, null, jobGraph);
- }
-
- /**
- * Sets the class of the vertex's output task.
- *
- * @param outputClass
- * The class of the vertex's output task.
- */
- public void setOutputClass(final Class<? extends AbstractOutputTask> outputClass) {
- this.invokableClass = outputClass;
+ public JobOutputVertex(JobGraph jobGraph) {
+ this(null, jobGraph);
}
-
- /**
- * Returns the class of the vertex's output task.
- *
- * @return The class of the vertex's output task or <code>null</code> if no task has yet been set.
- */
- @SuppressWarnings("unchecked")
- public Class<? extends AbstractOutputTask> getOutputClass() {
- return (Class<? extends AbstractOutputTask>) this.invokableClass;
+
+ public void setOutputFormat(OutputFormat<?> format) {
+ this.outputFormat = format;
}
-
- /**
- * Sets the output format and writes it to the task configuration.
- *
- * @param outputFormatWrapper Wrapped output format
- */
- public void setOutputFormat(UserCodeWrapper<? extends OutputFormat<?>> outputFormatWrapper){
- TaskConfig config = new TaskConfig(this.getConfiguration());
- config.setStubWrapper(outputFormatWrapper);
- outputFormat = outputFormatWrapper.getUserCodeObject();
- }
-
- /**
- * Sets the output format and writes it to the task configuration.
- *
- * @param outputFormat Output format
- */
- public void setOutputFormat(OutputFormat<?> outputFormat){
- this.outputFormat = outputFormat;
- UserCodeWrapper<? extends OutputFormat<?>> wrapper = new UserCodeObjectWrapper<OutputFormat<?>>
- (outputFormat);
- TaskConfig config = new TaskConfig(this.getConfiguration());
- config.setStubWrapper(wrapper);
- }
-
- /**
- * Sets the output format parameters for the output format by writing it to the task configuration.
- *
- * @param parameters Output format parameters
- */
- public void setOutputFormatParameters(Configuration parameters){
- TaskConfig config = new TaskConfig(this.getConfiguration());
- config.setStubParameters(parameters);
-
- outputFormat.configure(parameters);
- }
-
- /**
- * Deserializes the output format from the deserialized configuration if it contains an output format. The output
- * format is always stored in the stub wrapper. If the task configuration contains an output format,
- * then it is configured after deserialization.
- *
- * @param input
- * @throws IOException
- */
- @Override
- public void read(final DataInput input) throws IOException{
- super.read(input);
-
- ClassLoader cl = null;
- try{
- cl = LibraryCacheManager.getClassLoader(this.getJobGraph().getJobID());
- }
- catch (IOException ioe) {
- throw new RuntimeException("Usercode ClassLoader could not be obtained for job: " +
- this.getJobGraph().getJobID(), ioe);
- }
-
- final Configuration config = this.getConfiguration();
- config.setClassLoader(cl);
- final TaskConfig taskConfig = new TaskConfig(config);
-
- if(taskConfig.hasStubWrapper()){
- outputFormat = taskConfig.<OutputFormat<?> >getStubWrapper(cl).getUserCodeObject(OutputFormat.class,cl);
- outputFormat.configure(taskConfig.getStubParameters());
+
+ public void initializeOutputFormatFromTaskConfig(ClassLoader cl) {
+ TaskConfig cfg = new TaskConfig(getConfiguration());
+ UserCodeWrapper<OutputFormat<?>> wrapper = cfg.<OutputFormat<?>>getStubWrapper(cl);
+
+ if (wrapper != null) {
+ this.outputFormat = wrapper.getUserCodeObject(OutputFormat.class, cl);
+ this.outputFormat.configure(cfg.getStubParameters());
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
index 8672aeb..d16286c 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
@@ -13,8 +13,6 @@
package eu.stratosphere.nephele.jobgraph;
-import eu.stratosphere.nephele.template.AbstractTask;
-
/**
* A JobTaskVertex is the vertex type for regular tasks (with both input and output) in Nephele.
* Tasks running inside a JobTaskVertex must specify at least one record reader and one record writer.
@@ -27,28 +25,15 @@ public class JobTaskVertex extends AbstractJobVertex {
*
* @param name
* the name for the new job task vertex
- * @param id
- * the ID of this vertex
* @param jobGraph
* the job graph this vertex belongs to
*/
- public JobTaskVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
- super(name, id, jobGraph);
-
- jobGraph.addVertex(this);
+ public JobTaskVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
}
-
- /**
- * Creates a new job task vertex with the specified name.
- *
- * @param name
- * the name for the new job task vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobTaskVertex(final String name, final JobGraph jobGraph) {
- super(name, null, jobGraph);
-
+
+ public JobTaskVertex(String name, JobVertexID id, JobGraph jobGraph) {
+ super(name, id, jobGraph);
jobGraph.addVertex(this);
}
@@ -58,29 +43,7 @@ public class JobTaskVertex extends AbstractJobVertex {
* @param jobGraph
* the job graph this vertex belongs to
*/
- public JobTaskVertex(final JobGraph jobGraph) {
- super(null, null, jobGraph);
-
- jobGraph.addVertex(this);
- }
-
- /**
- * Sets the class of the vertex's task.
- *
- * @param taskClass
- * the class of the vertex's task
- */
- public void setTaskClass(final Class<? extends AbstractTask> taskClass) {
- this.invokableClass = taskClass;
- }
-
- /**
- * Returns the class of the vertex's task.
- *
- * @return the class of the vertex's task or <code>null</code> if the class has not yet been set
- */
- @SuppressWarnings("unchecked")
- public Class<? extends AbstractTask> getTaskClass() {
- return (Class<? extends AbstractTask>) this.invokableClass;
+ public JobTaskVertex(JobGraph jobGraph) {
+ this(null, jobGraph);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
index 8a3cba4..f3cf3a3 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
@@ -393,9 +393,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
try {
// First check if job is null
if (job == null) {
- JobSubmissionResult result = new JobSubmissionResult(AbstractJobResult.ReturnCode.ERROR,
- "Submitted job is null!");
- return result;
+ return new JobSubmissionResult(AbstractJobResult.ReturnCode.ERROR, "Submitted job is null!");
}
if (LOG.isDebugEnabled()) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
index 790aca9..da63bf2 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
@@ -31,8 +31,6 @@ import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertexIterator;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.jobmanager.splitassigner.file.FileInputSplitAssigner;
-import eu.stratosphere.nephele.template.AbstractInputTask;
-import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.util.StringUtils;
/**
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
index 1e6929d..dc52911 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
@@ -24,8 +24,6 @@ import eu.stratosphere.core.io.InputSplit;
import eu.stratosphere.core.io.LocatableInputSplit;
import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.template.AbstractInputTask;
-import eu.stratosphere.nephele.template.AbstractInvokable;
/**
* The locatable input split assigner is a specific implementation of the {@link InputSplitAssigner} interface for
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
index 048562c..3580fda 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
@@ -25,8 +25,6 @@ import eu.stratosphere.core.io.InputSplit;
import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
import eu.stratosphere.nephele.jobmanager.splitassigner.InputSplitAssigner;
-import eu.stratosphere.nephele.template.AbstractInputTask;
-import eu.stratosphere.nephele.template.AbstractInvokable;
/**
* The file input split assigner is a specific implementation of the {@link InputSplitAssigner} interface for
@@ -89,14 +87,11 @@ public final class FileInputSplitAssigner implements InputSplitAssigner {
}
}
-
@Override
public void unregisterGroupVertex(final ExecutionGroupVertex groupVertex) {
-
this.vertexMap.remove(groupVertex);
}
-
@Override
public InputSplit getNextInputSplit(final ExecutionVertex vertex) {
[12/22] git commit: Added failing behavior to JobManager if it is
still in scheduled or created state New test case: Exception during
ExecutionGraph construction in the JobManager Removed checkConfiguration
method from AbstractJobVertex because it is not
Posted by se...@apache.org.
Added failing behavior to JobManager if it is still in scheduled or created state
New test case: Exception during ExecutionGraph construction in the JobManager
Removed checkConfiguration method from AbstractJobVertex because it is not needed anymore
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/93bc0b9b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/93bc0b9b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/93bc0b9b
Branch: refs/heads/master
Commit: 93bc0b9bf281490610d51f086eb0df781f207876
Parents: ea79186
Author: Till Rohrmann <ti...@mailbox.tu-berlin.de>
Authored: Thu Mar 27 03:01:22 2014 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:20 2014 +0200
----------------------------------------------------------------------
.../plantranslate/NepheleJobGraphGenerator.java | 3 +-
.../api/common/io/FileOutputFormat.java | 5 +
.../api/common/io/OutputFormat.java | 3 +-
.../configuration/Configuration.java | 4 +-
.../nephele/executiongraph/ExecutionGraph.java | 11 +-
.../jobgraph/AbstractJobInputVertex.java | 13 +++
.../nephele/jobgraph/AbstractJobVertex.java | 12 --
.../nephele/jobgraph/JobInputVertex.java | 45 +++++++-
.../nephele/jobgraph/JobOutputVertex.java | 31 +++++
.../jobmanager/ExceptionOutputFormat.java | 53 +++++++++
.../nephele/jobmanager/JobManagerITCase.java | 114 ++++++++++++++++++-
11 files changed, 268 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
index 200ef7c..3089cdb 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
@@ -834,8 +834,7 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
vertex.getConfiguration().setInteger(DataSinkTask.DEGREE_OF_PARALLELISM_KEY, node.getDegreeOfParallelism());
// set user code
- vertex.setOutputFormat((UserCodeWrapper<? extends OutputFormat<?>>)node.getPactContract().getUserCodeWrapper
- ());
+ vertex.setOutputFormat((UserCodeWrapper<? extends OutputFormat<?>>)node.getPactContract().getUserCodeWrapper());
vertex.setOutputFormatParameters(node.getPactContract().getParameters());
return vertex;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
index 7733c71..c4e1d5a 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
@@ -438,6 +438,11 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
}
}
+ /**
+ * Initialization of the distributed file system if it is used.
+ *
+ * @param configuration The task configuration
+ */
@Override
public void initialize(Configuration configuration){
final Path path = this.getOutputFilePath();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
index 3b66902..bdc59e4 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
@@ -81,7 +81,8 @@ public interface OutputFormat<IT> extends Serializable {
void close() throws IOException;
/**
- * Method which is called on the master node prior to execution. It can be used to set up the output format.
+ * Method which is called on the JobManager node prior to execution. It can be used to set up output format
+ * related tasks.
*
* @param configuration The task configuration
*/
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
index 0271b59..451577f 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
@@ -407,10 +407,10 @@ public class Configuration implements IOReadableWritable {
}
/**
- * Checks whether there is an entry with key
+ * Checks whether there is an entry with the specified key
*
* @param key key of entry
- * @return true if entry with key is stored in the configuration, otherwise false
+ * @return true if the key is stored, false otherwise
*/
public boolean containsKey(String key){
synchronized (this.confData){
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
index 93e0a25..1c4a820 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
@@ -465,13 +465,6 @@ public class ExecutionGraph implements ExecutionListener {
throw new GraphConversionException(StringUtils.stringifyException(t));
}
- // Run the configuration check the user has provided for the vertex
- try {
- jobVertex.checkConfiguration(groupVertex.getEnvironment().getInvokable());
- } catch (IllegalConfigurationException e) {
- throw new GraphConversionException(StringUtils.stringifyException(e));
- }
-
// Register input and output vertices separately
if (jobVertex instanceof AbstractJobInputVertex) {
@@ -1043,6 +1036,8 @@ public class ExecutionGraph implements ExecutionListener {
if (eg.jobHasFailedOrCanceledStatus()) {
return InternalJobStatus.CANCELED;
}
+ }else if(latestStateChange == ExecutionState.FAILED){
+ return InternalJobStatus.FAILING;
}
break;
case SCHEDULED:
@@ -1052,6 +1047,8 @@ public class ExecutionGraph implements ExecutionListener {
if (eg.jobHasFailedOrCanceledStatus()) {
return InternalJobStatus.CANCELED;
}
+ }else if(latestStateChange == ExecutionState.FAILED){
+ return InternalJobStatus.FAILING;
}
break;
case RUNNING:
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
index 22b4d7c..e4d3b9d 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
@@ -39,6 +39,19 @@ public abstract class AbstractJobInputVertex extends AbstractJobVertex {
jobGraph.addVertex(this);
}
+ /**
+ * Returns the input split type of the input splits created by this input vertex
+ *
+ * @return input split type class
+ */
public abstract Class<? extends InputSplit> getInputSplitType();
+
+ /**
+ * Computes the input splits created by this input vertex
+ *
+ * @param minNumSplits Number of minimal input splits
+ * @return Array of input splits
+ * @throws Exception
+ */
public abstract InputSplit[] getInputSplits(int minNumSplits) throws Exception;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
index 7cec46a..cdadd3c 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
@@ -573,18 +573,6 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
}
/**
- * Performs check whether the vertex has been properly configured
- *
- * @param configuration
- * configuration of this vertex
- * @throws IllegalConfigurationException
- * thrown if the respective tasks is not configured properly
- */
- public void checkConfiguration(final Configuration configuration) throws IllegalConfigurationException {
- //default configuration check
- }
-
- /**
* Returns the invokable class which represents the task of this vertex
*
* @return the invokable class, <code>null</code> if it is not set
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
index 9e5f6c7..29f98d9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
@@ -27,6 +27,11 @@ import java.io.DataInput;
import java.io.IOException;
public class JobInputVertex extends AbstractJobInputVertex {
+ /**
+ * Input format associated to this JobInputVertex. It is either directly set or reconstructed from the task
+ * configuration. Every job input vertex requires an input format to compute the input splits and the input split
+ * type.
+ */
private volatile InputFormat<?, ? extends InputSplit> inputFormat = null;
/**
@@ -85,6 +90,11 @@ public class JobInputVertex extends AbstractJobInputVertex {
return (Class<? extends AbstractInputTask<?>>) this.invokableClass;
}
+ /**
+ * Sets the input format and writes it to the task configuration. It extracts it from the UserCodeWrapper.
+ *
+ * @param inputFormatWrapper Wrapped input format
+ */
public void setInputFormat(UserCodeWrapper<? extends InputFormat<?, ? extends InputSplit>> inputFormatWrapper) {
TaskConfig config = new TaskConfig(this.getConfiguration());
config.setStubWrapper(inputFormatWrapper);
@@ -92,6 +102,11 @@ public class JobInputVertex extends AbstractJobInputVertex {
inputFormat = inputFormatWrapper.getUserCodeObject();
}
+ /**
+ * Sets the input format and writes it to the task configuration.
+ *
+ * @param inputFormat Input format
+ */
public void setInputFormat(InputFormat<?, ? extends InputSplit> inputFormat) {
this.inputFormat = inputFormat;
@@ -101,6 +116,11 @@ public class JobInputVertex extends AbstractJobInputVertex {
config.setStubWrapper(wrapper);
}
+ /**
+ * Sets the input format parameters.
+ *
+ * @param inputFormatParameters Input format parameters
+ */
public void setInputFormatParameters(Configuration inputFormatParameters){
TaskConfig config = new TaskConfig(this.getConfiguration());
config.setStubParameters(inputFormatParameters);
@@ -112,12 +132,23 @@ public class JobInputVertex extends AbstractJobInputVertex {
inputFormat.configure(inputFormatParameters);
}
+ /**
+ * Sets the output serializer for the task associated to this vertex.
+ *
+ * @param factory Type serializer factory
+ */
public void setOutputSerializer(TypeSerializerFactory<?> factory){
TaskConfig config = new TaskConfig(this.getConfiguration());
config.setOutputSerializer(factory);
}
-
+ /**
+ * Deserializes the input format from the deserialized task configuration. It then configures the input format by
+ * calling the configure method with the current configuration.
+ *
+ * @param input
+ * @throws IOException
+ */
@Override
public void read(final DataInput input) throws IOException{
super.read(input);
@@ -143,6 +174,11 @@ public class JobInputVertex extends AbstractJobInputVertex {
inputFormat.configure(taskConfig.getStubParameters());
}
+ /**
+ * Gets the input split type class
+ *
+ * @return Input split type class
+ */
@Override
public Class<? extends InputSplit> getInputSplitType() {
if(inputFormat == null){
@@ -152,6 +188,13 @@ public class JobInputVertex extends AbstractJobInputVertex {
return inputFormat.getInputSplitType();
}
+ /**
+ * Gets the input splits from the input format.
+ *
+ * @param minNumSplits Number of minimal input splits
+ * @return Array of input splits
+ * @throws IOException
+ */
@Override
public InputSplit[] getInputSplits(int minNumSplits) throws IOException {
if(inputFormat == null){
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
index 154e639..cf937a0 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
@@ -31,6 +31,9 @@ import java.io.IOException;
*
*/
public class JobOutputVertex extends AbstractJobOutputVertex {
+ /**
+ * Contains the output format associated to this output vertex. It can be <pre>null</pre>.
+ */
private volatile OutputFormat<?> outputFormat = null;
/**
@@ -89,12 +92,22 @@ public class JobOutputVertex extends AbstractJobOutputVertex {
return (Class<? extends AbstractOutputTask>) this.invokableClass;
}
+ /**
+ * Sets the output format and writes it to the task configuration.
+ *
+ * @param outputFormatWrapper Wrapped output format
+ */
public void setOutputFormat(UserCodeWrapper<? extends OutputFormat<?>> outputFormatWrapper){
TaskConfig config = new TaskConfig(this.getConfiguration());
config.setStubWrapper(outputFormatWrapper);
outputFormat = outputFormatWrapper.getUserCodeObject();
}
+ /**
+ * Sets the output format and writes it to the task configuration.
+ *
+ * @param outputFormat Output format
+ */
public void setOutputFormat(OutputFormat<?> outputFormat){
this.outputFormat = outputFormat;
UserCodeWrapper<? extends OutputFormat<?>> wrapper = new UserCodeObjectWrapper<OutputFormat<?>>
@@ -103,6 +116,11 @@ public class JobOutputVertex extends AbstractJobOutputVertex {
config.setStubWrapper(wrapper);
}
+ /**
+ * Sets the output format parameters for the output format by writing it to the task configuration.
+ *
+ * @param parameters Output format parameters
+ */
public void setOutputFormatParameters(Configuration parameters){
TaskConfig config = new TaskConfig(this.getConfiguration());
config.setStubParameters(parameters);
@@ -110,6 +128,14 @@ public class JobOutputVertex extends AbstractJobOutputVertex {
outputFormat.configure(parameters);
}
+ /**
+ * Deserializes the output format from the deserialized configuration if it contains an output format. The output
+ * format is always stored in the stub wrapper. If the task configuration contains an output format,
+ * then it is configured after deserialization.
+ *
+ * @param input
+ * @throws IOException
+ */
@Override
public void read(final DataInput input) throws IOException{
super.read(input);
@@ -133,5 +159,10 @@ public class JobOutputVertex extends AbstractJobOutputVertex {
}
}
+ /**
+ * Returns the output format. It can also be <pre>null</pre>.
+ *
+ * @return output format or <pre>null</pre>
+ */
public OutputFormat<?> getOutputFormat() { return outputFormat; }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java
new file mode 100644
index 0000000..ffc4b42
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java
@@ -0,0 +1,53 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.jobmanager;
+
+import eu.stratosphere.api.common.io.OutputFormat;
+import eu.stratosphere.configuration.Configuration;
+
+import java.io.IOException;
+
+
+public class ExceptionOutputFormat implements OutputFormat<Object> {
+ /**
+ * The message which is used for the test runtime exception.
+ */
+ public static final String RUNTIME_EXCEPTION_MESSAGE = "This is a test runtime exception";
+
+
+ @Override
+ public void configure(Configuration parameters) {
+
+ }
+
+ @Override
+ public void open(int taskNumber, int numTasks) throws IOException {
+
+ }
+
+ @Override
+ public void writeRecord(Object record) throws IOException {
+
+ }
+
+ @Override
+ public void close() throws IOException {
+
+ }
+
+ @Override
+ public void initialize(Configuration configuration) {
+ throw new RuntimeException(RUNTIME_EXCEPTION_MESSAGE);
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/93bc0b9b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
index 89f7428..db2d9af 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
@@ -367,7 +367,7 @@ public class JobManagerITCase {
try {
- inputFile = ServerTestUtils.createInputFile(0);
+ inputFile = ServerTestUtils.createInputFile(100);
outputFile = new File(ServerTestUtils.getTempDir() + File.separator + ServerTestUtils.getRandomFilename());
jarFile = ServerTestUtils.createJarFile(runtimeExceptionClassName);
@@ -451,6 +451,118 @@ public class JobManagerITCase {
}
/**
+ * Tests the Nephele execution when a runtime exception in the output format occurs.
+ */
+ @Test
+ public void testExecutionWithRuntimeExceptionInOutputFormat() {
+
+ final String runtimeExceptionClassName = RuntimeExceptionTask.class.getSimpleName();
+ File inputFile = null;
+ File outputFile = null;
+ File jarFile = null;
+ JobClient jobClient = null;
+
+ try {
+
+ inputFile = ServerTestUtils.createInputFile(100);
+ outputFile = new File(ServerTestUtils.getTempDir() + File.separator + ServerTestUtils.getRandomFilename());
+ jarFile = ServerTestUtils.createJarFile(runtimeExceptionClassName);
+
+ // Create job graph
+ final JobGraph jg = new JobGraph("Job Graph for Exception Test");
+
+ // input vertex
+ final JobInputVertex i1 = new JobInputVertex("Input 1", jg);
+ i1.setNumberOfSubtasks(1);
+ Class<AbstractInputTask<?>> clazz = (Class<AbstractInputTask<?>>)(Class<?>)DataSourceTask
+ .class;
+ i1.setInputClass(clazz);
+ TextInputFormat inputFormat = new TextInputFormat();
+ inputFormat.setFilePath(new Path(inputFile.toURI()));
+ i1.setInputFormat(inputFormat);
+ i1.setInputFormat(inputFormat);
+ i1.setOutputSerializer(RecordSerializerFactory.get());
+ TaskConfig config= new TaskConfig(i1.getConfiguration());
+ config.addOutputShipStrategy(ShipStrategyType.FORWARD);
+
+ // task vertex 1
+ final JobTaskVertex t1 = new JobTaskVertex("Task with Exception", jg);
+ t1.setTaskClass(ForwardTask.class);
+
+ // output vertex
+ JobOutputVertex o1 = new JobOutputVertex("Output 1", jg);
+ o1.setNumberOfSubtasks(1);
+ o1.setOutputClass(DataSinkTask.class);
+ ExceptionOutputFormat outputFormat = new ExceptionOutputFormat();
+ o1.setOutputFormat(outputFormat);
+ TaskConfig outputConfig = new TaskConfig(o1.getConfiguration());
+ outputConfig.addInputToGroup(0);
+ outputConfig.setInputSerializer(RecordSerializerFactory.get(), 0);
+
+ t1.setVertexToShareInstancesWith(i1);
+ o1.setVertexToShareInstancesWith(i1);
+
+ // connect vertices
+ i1.connectTo(t1, ChannelType.IN_MEMORY);
+ t1.connectTo(o1, ChannelType.IN_MEMORY);
+
+ // add jar
+ jg.addJar(new Path(new File(ServerTestUtils.getTempDir() + File.separator + runtimeExceptionClassName
+ + ".jar").toURI()));
+
+ // Create job client and launch job
+ jobClient = new JobClient(jg, configuration);
+
+ // deactivate logging of expected test exceptions
+ Logger jcLogger = Logger.getLogger(JobClient.class);
+ Level jcLevel = jcLogger.getEffectiveLevel();
+ jcLogger.setLevel(Level.OFF);
+ try {
+ jobClient.submitJobAndWait();
+ } catch (JobExecutionException e) {
+
+ // Check if the correct error message is encapsulated in the exception
+ if (e.getMessage() == null) {
+ fail("JobExecutionException does not contain an error message");
+ }
+ if (!e.getMessage().contains(RuntimeExceptionTask.RUNTIME_EXCEPTION_MESSAGE)) {
+ fail("JobExecutionException does not contain the expected error message, " +
+ "but instead: " + e.getMessage());
+ }
+
+ // Check if the correct error message is encapsulated in the exception
+ return;
+ }
+ finally {
+ jcLogger.setLevel(jcLevel);
+ }
+
+ fail("Expected exception but did not receive it");
+
+ } catch (JobGraphDefinitionException jgde) {
+ fail(jgde.getMessage());
+ } catch (IOException ioe) {
+ fail(ioe.getMessage());
+ } finally {
+
+ // Remove temporary files
+ if (inputFile != null) {
+ inputFile.delete();
+ }
+ if (outputFile != null) {
+ outputFile.delete();
+ }
+ if (jarFile != null) {
+ jarFile.delete();
+ }
+
+ if (jobClient != null) {
+ jobClient.close();
+ }
+ }
+ }
+
+ /**
* Creates a file with a sequence of 0 to <code>limit</code> integer numbers
* and triggers a sample job. The sample reads all the numbers from the input file and pushes them through a
* network, a file, and an in-memory channel. Eventually, the numbers are written back to an output file. The test
[17/22] git commit: Replaced Tarjan's algorithm with a simpler
depth-first traversal cycle detection algorithm. By doing so,
one gets rid off a possible linear time check whether a node is contained in
the stack.
Posted by se...@apache.org.
Replaced Tarjan's algorithm with a simpler depth-first traversal cycle detection algorithm. By doing so, one gets rid off a possible linear time check whether a node is contained in the stack.
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/fba44a94
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/fba44a94
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/fba44a94
Branch: refs/heads/master
Commit: fba44a94b9e3dd7f87c391278f17ae55393dd51b
Parents: 93bc0b9
Author: Till Rohrmann <ti...@gmail.com>
Authored: Wed Apr 9 14:32:55 2014 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:20 2014 +0200
----------------------------------------------------------------------
.../stratosphere/nephele/jobgraph/JobGraph.java | 76 ++++++++------------
1 file changed, 28 insertions(+), 48 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/fba44a94/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
index 41fd907..804a258 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
@@ -424,18 +424,14 @@ public class JobGraph implements IOReadableWritable {
*/
public boolean isAcyclic() {
- // Tarjan's algorithm to detect strongly connected componenent of a graph
final AbstractJobVertex[] reachable = getAllReachableJobVertices();
- final HashMap<AbstractJobVertex, Integer> indexMap = new HashMap<AbstractJobVertex, Integer>();
- final HashMap<AbstractJobVertex, Integer> lowLinkMap = new HashMap<AbstractJobVertex, Integer>();
- final Stack<AbstractJobVertex> stack = new Stack<AbstractJobVertex>();
- final Integer index = Integer.valueOf(0);
- for (int i = 0; i < reachable.length; i++) {
- if (!indexMap.containsKey(reachable[i])) {
- if (!tarjan(reachable[i], index, indexMap, lowLinkMap, stack)) {
- return false;
- }
+ final HashSet<JobVertexID> temporarilyMarked = new HashSet<JobVertexID>();
+ final HashSet<JobVertexID> permanentlyMarked = new HashSet<JobVertexID>();
+
+ for(int i = 0; i < reachable.length; i++){
+ if(detectCycle(reachable[i], temporarilyMarked, permanentlyMarked)){
+ return false;
}
}
@@ -443,51 +439,35 @@ public class JobGraph implements IOReadableWritable {
}
/**
- * Auxiliary method implementing Tarjan's algorithm for strongly-connected components to determine whether the job
- * graph is acyclic.
+ * Auxiliary method for cycle detection. Performs a depth-first traversal with vertex markings to detect a cycle.
+ * If a node with a temporary marking is found, then there is a cycle. Once all children of a vertex have been
+ * traversed the parent node cannot be part of another cycle and is thus permanently marked.
+ *
+ * @param jv current job vertex to check
+ * @param temporarilyMarked set of temporarily marked nodes
+ * @param permanentlyMarked set of permanently marked nodes
+ * @return <code>true</code> if there is a cycle, <code>false</code> otherwise
*/
- private boolean tarjan(final AbstractJobVertex jv, Integer index,
- final HashMap<AbstractJobVertex, Integer> indexMap, final HashMap<AbstractJobVertex, Integer> lowLinkMap,
- final Stack<AbstractJobVertex> stack) {
-
- indexMap.put(jv, Integer.valueOf(index));
- lowLinkMap.put(jv, Integer.valueOf(index));
- index = Integer.valueOf(index.intValue() + 1);
- stack.push(jv);
-
- for (int i = 0; i < jv.getNumberOfForwardConnections(); i++) {
+ private boolean detectCycle(final AbstractJobVertex jv, final HashSet<JobVertexID> temporarilyMarked,
+ final HashSet<JobVertexID> permanentlyMarked){
+ JobVertexID vertexID = jv.getID();
- final AbstractJobVertex jv2 = jv.getForwardConnection(i).getConnectedVertex();
- if (!indexMap.containsKey(jv2) || stack.contains(jv2)) {
- if (!indexMap.containsKey(jv2)) {
- if (!tarjan(jv2, index, indexMap, lowLinkMap, stack)) {
- return false;
- }
- }
- if (lowLinkMap.get(jv) > lowLinkMap.get(jv2)) {
- lowLinkMap.put(jv, Integer.valueOf(lowLinkMap.get(jv2)));
- }
- }
- }
-
- if (lowLinkMap.get(jv).equals(indexMap.get(jv))) {
+ if(permanentlyMarked.contains(vertexID)){
+ return false;
+ }else if(temporarilyMarked.contains(vertexID)){
+ return true;
+ }else{
+ temporarilyMarked.add(vertexID);
- int count = 0;
- while (stack.size() > 0) {
- final AbstractJobVertex jv2 = stack.pop();
- if (jv == jv2) {
- break;
+ for(int i = 0; i < jv.getNumberOfForwardConnections(); i++){
+ if(detectCycle(jv.getForwardConnection(i).getConnectedVertex(), temporarilyMarked, permanentlyMarked)){
+ return true;
}
-
- count++;
}
- if (count > 0) {
- return false;
- }
+ permanentlyMarked.add(vertexID);
+ return false;
}
-
- return true;
}
/**
[06/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/RecoveryLogic.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/RecoveryLogic.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/RecoveryLogic.java
deleted file mode 100644
index e369613..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/RecoveryLogic.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-package eu.stratosphere.nephele.jobmanager.scheduler;
-
-import java.io.IOException;
-import java.util.ArrayDeque;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-
-import eu.stratosphere.nephele.taskmanager.AbstractTaskResult;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import eu.stratosphere.nephele.execution.ExecutionState;
-import eu.stratosphere.nephele.executiongraph.ExecutionEdge;
-import eu.stratosphere.nephele.executiongraph.ExecutionGate;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.DummyInstance;
-import eu.stratosphere.runtime.io.channels.ChannelID;
-import eu.stratosphere.nephele.taskmanager.TaskCancelResult;
-import eu.stratosphere.nephele.util.SerializableHashSet;
-import eu.stratosphere.util.StringUtils;
-
-public final class RecoveryLogic {
-
- /**
- * The logger to report information and problems.
- */
- private static final Log LOG = LogFactory.getLog(RecoveryLogic.class);
-
- /**
- * Private constructor so class cannot be instantiated.
- */
- private RecoveryLogic() {
- }
-
- public static boolean recover(final ExecutionVertex failedVertex,
- final Map<ExecutionVertexID, ExecutionVertex> verticesToBeRestarted,
- final Set<ExecutionVertex> assignedVertices) {
-
- // Perform initial sanity check
- if (failedVertex.getExecutionState() != ExecutionState.FAILED) {
- LOG.error("Vertex " + failedVertex + " is requested to be recovered, but is not failed");
- return false;
- }
-
- final ExecutionGraph eg = failedVertex.getExecutionGraph();
- synchronized (eg) {
-
- LOG.info("Starting recovery for failed vertex " + failedVertex);
-
- final Set<ExecutionVertex> verticesToBeCanceled = new HashSet<ExecutionVertex>();
-
- findVerticesToRestart(failedVertex, verticesToBeCanceled);
-
- // Restart all predecessors without checkpoint
- final Iterator<ExecutionVertex> cancelIterator = verticesToBeCanceled.iterator();
- while (cancelIterator.hasNext()) {
-
- final ExecutionVertex vertex = cancelIterator.next();
-
- if (vertex.compareAndUpdateExecutionState(ExecutionState.FINISHED, getStateToUpdate(vertex))) {
- LOG.info("Vertex " + vertex + " has already finished and will not be canceled");
- if (vertex.getExecutionState() == ExecutionState.ASSIGNED) {
- assignedVertices.add(vertex);
- }
- continue;
- }
-
- LOG.info(vertex + " is canceled by recovery logic");
- verticesToBeRestarted.put(vertex.getID(), vertex);
- final TaskCancelResult cancelResult = vertex.cancelTask();
-
- if (cancelResult.getReturnCode() != AbstractTaskResult.ReturnCode.SUCCESS
- && cancelResult.getReturnCode() != AbstractTaskResult.ReturnCode.TASK_NOT_FOUND) {
-
- verticesToBeRestarted.remove(vertex.getID());
- LOG.error("Unable to cancel vertex" + cancelResult.getDescription());
- return false;
- }
- }
-
- LOG.info("Starting cache invalidation");
-
- // Invalidate the lookup caches
- if (!invalidateReceiverLookupCaches(failedVertex, verticesToBeCanceled)) {
- return false;
- }
-
- LOG.info("Cache invalidation complete");
-
- // Restart failed vertex
- failedVertex.updateExecutionState(getStateToUpdate(failedVertex));
- if (failedVertex.getExecutionState() == ExecutionState.ASSIGNED) {
- assignedVertices.add(failedVertex);
- }
- }
-
- return true;
- }
-
- static boolean hasInstanceAssigned(final ExecutionVertex vertex) {
-
- return !(vertex.getAllocatedResource().getInstance() instanceof DummyInstance);
- }
-
- private static ExecutionState getStateToUpdate(final ExecutionVertex vertex) {
-
- if (hasInstanceAssigned(vertex)) {
- return ExecutionState.ASSIGNED;
- }
-
- return ExecutionState.CREATED;
- }
-
- private static void findVerticesToRestart(final ExecutionVertex failedVertex,
- final Set<ExecutionVertex> verticesToBeCanceled) {
-
- final Queue<ExecutionVertex> verticesToTest = new ArrayDeque<ExecutionVertex>();
- final Set<ExecutionVertex> visited = new HashSet<ExecutionVertex>();
- verticesToTest.add(failedVertex);
-
- while (!verticesToTest.isEmpty()) {
-
- final ExecutionVertex vertex = verticesToTest.poll();
-
- // Predecessors must be either checkpoints or need to be restarted, too
- for (int j = 0; j < vertex.getNumberOfPredecessors(); j++) {
- final ExecutionVertex predecessor = vertex.getPredecessor(j);
-
- if (hasInstanceAssigned(predecessor)) {
- verticesToBeCanceled.add(predecessor);
- }
-
- if (!visited.contains(predecessor)) {
- verticesToTest.add(predecessor);
- }
- }
- visited.add(vertex);
- }
- }
-
- private static final boolean invalidateReceiverLookupCaches(final ExecutionVertex failedVertex,
- final Set<ExecutionVertex> verticesToBeCanceled) {
-
- final Map<AbstractInstance, Set<ChannelID>> entriesToInvalidate = new HashMap<AbstractInstance, Set<ChannelID>>();
-
- collectCacheEntriesToInvalidate(failedVertex, entriesToInvalidate);
- for (final Iterator<ExecutionVertex> it = verticesToBeCanceled.iterator(); it.hasNext();) {
- collectCacheEntriesToInvalidate(it.next(), entriesToInvalidate);
- }
-
- final Iterator<Map.Entry<AbstractInstance, Set<ChannelID>>> it = entriesToInvalidate.entrySet().iterator();
-
- while (it.hasNext()) {
-
- final Map.Entry<AbstractInstance, Set<ChannelID>> entry = it.next();
- final AbstractInstance instance = entry.getKey();
-
- try {
- instance.invalidateLookupCacheEntries(entry.getValue());
- } catch (IOException ioe) {
- LOG.error(StringUtils.stringifyException(ioe));
- return false;
- }
- }
-
- return true;
- }
-
- private static void collectCacheEntriesToInvalidate(final ExecutionVertex vertex,
- final Map<AbstractInstance, Set<ChannelID>> entriesToInvalidate) {
-
- final int numberOfOutputGates = vertex.getNumberOfOutputGates();
- for (int i = 0; i < numberOfOutputGates; ++i) {
-
- final ExecutionGate outputGate = vertex.getOutputGate(i);
- for (int j = 0; j < outputGate.getNumberOfEdges(); ++j) {
-
- final ExecutionEdge outputChannel = outputGate.getEdge(j);
-
- final ExecutionVertex connectedVertex = outputChannel.getInputGate().getVertex();
- if (connectedVertex == null) {
- LOG.error("Connected vertex is null");
- continue;
- }
-
- final AbstractInstance instance = connectedVertex.getAllocatedResource().getInstance();
- if (instance instanceof DummyInstance) {
- continue;
- }
-
- Set<ChannelID> channelIDs = entriesToInvalidate.get(instance);
- if (channelIDs == null) {
- channelIDs = new SerializableHashSet<ChannelID>();
- entriesToInvalidate.put(instance, channelIDs);
- }
-
- channelIDs.add(outputChannel.getInputChannelID());
- }
- }
-
- for (int i = 0; i < vertex.getNumberOfInputGates(); ++i) {
-
- final ExecutionGate inputGate = vertex.getInputGate(i);
- for (int j = 0; j < inputGate.getNumberOfEdges(); ++j) {
-
- final ExecutionEdge inputChannel = inputGate.getEdge(j);
-
- final ExecutionVertex connectedVertex = inputChannel.getOutputGate().getVertex();
- if (connectedVertex == null) {
- LOG.error("Connected vertex is null");
- continue;
- }
-
- final AbstractInstance instance = connectedVertex.getAllocatedResource().getInstance();
- if (instance instanceof DummyInstance) {
- continue;
- }
-
- Set<ChannelID> channelIDs = entriesToInvalidate.get(instance);
- if (channelIDs == null) {
- channelIDs = new SerializableHashSet<ChannelID>();
- entriesToInvalidate.put(instance, channelIDs);
- }
-
- channelIDs.add(inputChannel.getOutputChannelID());
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalExecutionListener.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalExecutionListener.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalExecutionListener.java
deleted file mode 100644
index 9ae5635..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalExecutionListener.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager.scheduler.local;
-
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.jobmanager.scheduler.AbstractExecutionListener;
-
-/**
- * This is a wrapper class for the {@link LocalScheduler} to receive
- * notifications about state changes of vertices belonging
- * to scheduled jobs.
- * <p>
- * This class is thread-safe.
- *
- */
-public class LocalExecutionListener extends AbstractExecutionListener {
-
- public LocalExecutionListener(final LocalScheduler scheduler, final ExecutionVertex executionVertex) {
- super(scheduler, executionVertex);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalScheduler.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalScheduler.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalScheduler.java
deleted file mode 100644
index b731965..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/local/LocalScheduler.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager.scheduler.local;
-
-import java.util.ArrayDeque;
-import java.util.Deque;
-import java.util.Iterator;
-import java.util.Map;
-
-import eu.stratosphere.nephele.execution.ExecutionState;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraphIterator;
-import eu.stratosphere.nephele.executiongraph.ExecutionStage;
-import eu.stratosphere.nephele.executiongraph.ExecutionStageListener;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
-import eu.stratosphere.nephele.executiongraph.JobStatusListener;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.nephele.jobmanager.DeploymentManager;
-import eu.stratosphere.nephele.jobmanager.scheduler.AbstractScheduler;
-import eu.stratosphere.nephele.jobmanager.scheduler.SchedulingException;
-import eu.stratosphere.util.StringUtils;
-
-public class LocalScheduler extends AbstractScheduler implements JobStatusListener, ExecutionStageListener {
-
- /**
- * The job queue of the scheduler
- */
- private Deque<ExecutionGraph> jobQueue = new ArrayDeque<ExecutionGraph>();
-
- /**
- * Constructs a new local scheduler.
- *
- * @param deploymentManager
- * the deployment manager assigned to this scheduler
- * @param instanceManager
- * the instance manager to be used with this scheduler
- */
- public LocalScheduler(final DeploymentManager deploymentManager, final InstanceManager instanceManager) {
- super(deploymentManager, instanceManager);
- }
-
- void removeJobFromSchedule(final ExecutionGraph executionGraphToRemove) {
-
- boolean removedFromQueue = false;
-
- synchronized (this.jobQueue) {
-
- final Iterator<ExecutionGraph> it = this.jobQueue.iterator();
- while (it.hasNext()) {
-
- final ExecutionGraph executionGraph = it.next();
- // Field jobID of executionGraph is immutable, so no synchronization needed
- if (executionGraph.getJobID().equals(executionGraphToRemove.getJobID())) {
- removedFromQueue = true;
- it.remove();
- break;
- }
-
- }
- }
-
- if (!removedFromQueue) {
- LOG.error("Cannot find job " + executionGraphToRemove.getJobName() + " ("
- + executionGraphToRemove.getJobID() + ") to remove");
- }
-
- // TODO: Remove vertices from restart map
- }
-
-
- @Override
- public void schedulJob(final ExecutionGraph executionGraph) throws SchedulingException {
-
- // Get Map of all available Instance types
- final Map<InstanceType, InstanceTypeDescription> availableInstances = getInstanceManager()
- .getMapOfAvailableInstanceTypes();
-
- final Iterator<ExecutionStage> stageIt = executionGraph.iterator();
- while (stageIt.hasNext()) {
-
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
- final ExecutionStage stage = stageIt.next();
- stage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
-
- // Iterator over required Instances
- final Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMinimumIterator();
- while (it.hasNext()) {
-
- final Map.Entry<InstanceType, Integer> entry = it.next();
-
- final InstanceTypeDescription descr = availableInstances.get(entry.getKey());
- if (descr == null) {
- throw new SchedulingException("Unable to schedule job: No instance of type " + entry.getKey()
- + " available");
- }
-
- if (descr.getMaximumNumberOfAvailableInstances() != -1
- && descr.getMaximumNumberOfAvailableInstances() < entry.getValue().intValue()) {
- throw new SchedulingException("Unable to schedule job: " + entry.getValue().intValue()
- + " instances of type " + entry.getKey() + " required, but only "
- + descr.getMaximumNumberOfAvailableInstances() + " are available");
- }
- }
- }
-
- // Subscribe to job status notifications
- executionGraph.registerJobStatusListener(this);
-
- // Set state of each vertex for scheduled
- final ExecutionGraphIterator it2 = new ExecutionGraphIterator(executionGraph, true);
- while (it2.hasNext()) {
-
- final ExecutionVertex vertex = it2.next();
- vertex.registerExecutionListener(new LocalExecutionListener(this, vertex));
- }
-
- // Register the scheduler as an execution stage listener
- executionGraph.registerExecutionStageListener(this);
-
- // Add job to the job queue (important to add job to queue before requesting instances)
- synchronized (this.jobQueue) {
- this.jobQueue.add(executionGraph);
- }
-
- // Request resources for the first stage of the job
-
- final ExecutionStage executionStage = executionGraph.getCurrentExecutionStage();
- try {
- requestInstances(executionStage);
- } catch (InstanceException e) {
- final String exceptionMessage = StringUtils.stringifyException(e);
- LOG.error(exceptionMessage);
- this.jobQueue.remove(executionGraph);
- throw new SchedulingException(exceptionMessage);
- }
- }
-
-
- @Override
- public ExecutionGraph getExecutionGraphByID(final JobID jobID) {
-
- synchronized (this.jobQueue) {
-
- final Iterator<ExecutionGraph> it = this.jobQueue.iterator();
- while (it.hasNext()) {
-
- final ExecutionGraph executionGraph = it.next();
- if (executionGraph.getJobID().equals(jobID)) {
- return executionGraph;
- }
- }
- }
-
- return null;
- }
-
-
- @Override
- public void shutdown() {
-
- synchronized (this.jobQueue) {
- this.jobQueue.clear();
- }
-
- }
-
-
- @Override
- public void jobStatusHasChanged(final ExecutionGraph executionGraph, final InternalJobStatus newJobStatus,
- final String optionalMessage) {
-
- if (newJobStatus == InternalJobStatus.FAILED || newJobStatus == InternalJobStatus.FINISHED
- || newJobStatus == InternalJobStatus.CANCELED) {
- removeJobFromSchedule(executionGraph);
- }
- }
-
-
- @Override
- public void nextExecutionStageEntered(final JobID jobID, final ExecutionStage executionStage) {
-
- // Request new instances if necessary
- try {
- requestInstances(executionStage);
- } catch (InstanceException e) {
- // TODO: Handle this error correctly
- LOG.error(StringUtils.stringifyException(e));
- }
-
- // Deploy the assigned vertices
- deployAssignedInputVertices(executionStage.getExecutionGraph());
-
- // Initialize the replay of the previous stage's checkpoints
- replayCheckpointsFromPreviousStage(executionStage.getExecutionGraph());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueExecutionListener.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueExecutionListener.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueExecutionListener.java
deleted file mode 100644
index 1d37edc..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueExecutionListener.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager.scheduler.queue;
-
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.jobmanager.scheduler.AbstractExecutionListener;
-
-/**
- * This is a wrapper class for the {@link QueueScheduler} to receive
- * notifications about state changes of vertices belonging
- * to scheduled jobs.
- * <p>
- * This class is thread-safe.
- *
- */
-public final class QueueExecutionListener extends AbstractExecutionListener {
-
- /**
- * Constructs a new queue execution listener.
- *
- * @param scheduler
- * the scheduler this listener is connected with
- * @param executionVertex
- * the execution vertex this listener is created for
- */
- public QueueExecutionListener(final QueueScheduler scheduler, final ExecutionVertex executionVertex) {
- super(scheduler, executionVertex);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueScheduler.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueScheduler.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueScheduler.java
deleted file mode 100644
index cd76f04..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueScheduler.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager.scheduler.queue;
-
-import java.util.ArrayDeque;
-import java.util.Deque;
-import java.util.Iterator;
-import java.util.Map;
-
-import eu.stratosphere.nephele.execution.ExecutionState;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraphIterator;
-import eu.stratosphere.nephele.executiongraph.ExecutionStage;
-import eu.stratosphere.nephele.executiongraph.ExecutionStageListener;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
-import eu.stratosphere.nephele.executiongraph.JobStatusListener;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.nephele.jobmanager.DeploymentManager;
-import eu.stratosphere.nephele.jobmanager.scheduler.AbstractScheduler;
-import eu.stratosphere.nephele.jobmanager.scheduler.SchedulingException;
-import eu.stratosphere.util.StringUtils;
-
-/**
- * The queue scheduler mains of queue of all submitted jobs and executes one job at a time.
- *
- */
-public class QueueScheduler extends AbstractScheduler implements JobStatusListener, ExecutionStageListener {
-
- /**
- * The job queue where all submitted jobs go to.
- */
- private Deque<ExecutionGraph> jobQueue = new ArrayDeque<ExecutionGraph>();
-
- /**
- * Constructs a new queue scheduler.
- *
- * @param deploymentManager
- * the deployment manager assigned to this scheduler
- * @param instanceManager
- * the instance manager to be used with this scheduler
- */
- public QueueScheduler(final DeploymentManager deploymentManager, final InstanceManager instanceManager) {
- super(deploymentManager, instanceManager);
- }
-
- /**
- * Removes the job represented by the given {@link ExecutionGraph} from the scheduler.
- *
- * @param executionGraphToRemove
- * the job to be removed
- */
- void removeJobFromSchedule(final ExecutionGraph executionGraphToRemove) {
-
- boolean removedFromQueue = false;
-
- synchronized (this.jobQueue) {
-
- final Iterator<ExecutionGraph> it = this.jobQueue.iterator();
- while (it.hasNext()) {
-
- final ExecutionGraph executionGraph = it.next();
- if (executionGraph.getJobID().equals(executionGraphToRemove.getJobID())) {
- removedFromQueue = true;
- it.remove();
- break;
- }
- }
- }
-
- if (!removedFromQueue) {
- LOG.error("Cannot find job " + executionGraphToRemove.getJobName() + " ("
- + executionGraphToRemove.getJobID() + ") to remove");
- }
- }
-
-
- @Override
- public void schedulJob(final ExecutionGraph executionGraph) throws SchedulingException {
-
- // Get Map of all available Instance types
- final Map<InstanceType, InstanceTypeDescription> availableInstances = getInstanceManager()
- .getMapOfAvailableInstanceTypes();
-
- final Iterator<ExecutionStage> stageIt = executionGraph.iterator();
- while (stageIt.hasNext()) {
-
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
- final ExecutionStage stage = stageIt.next();
- stage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
-
- // Iterator over required Instances
- final Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMinimumIterator();
- while (it.hasNext()) {
-
- final Map.Entry<InstanceType, Integer> entry = it.next();
-
- final InstanceTypeDescription descr = availableInstances.get(entry.getKey());
- if (descr == null) {
- throw new SchedulingException("Unable to schedule job: No instance of type " + entry.getKey()
- + " available");
- }
-
- if (descr.getMaximumNumberOfAvailableInstances() != -1
- && descr.getMaximumNumberOfAvailableInstances() < entry.getValue().intValue()) {
- throw new SchedulingException("Unable to schedule job: " + entry.getValue().intValue()
- + " instances of type " + entry.getKey() + " required, but only "
- + descr.getMaximumNumberOfAvailableInstances() + " are available");
- }
- }
- }
-
- // Subscribe to job status notifications
- executionGraph.registerJobStatusListener(this);
-
- // Register execution listener for each vertex
- final ExecutionGraphIterator it2 = new ExecutionGraphIterator(executionGraph, true);
- while (it2.hasNext()) {
-
- final ExecutionVertex vertex = it2.next();
- vertex.registerExecutionListener(new QueueExecutionListener(this, vertex));
- }
-
- // Register the scheduler as an execution stage listener
- executionGraph.registerExecutionStageListener(this);
-
- // Add job to the job queue (important to add job to queue before requesting instances)
- synchronized (this.jobQueue) {
- this.jobQueue.add(executionGraph);
- }
-
- // Request resources for the first stage of the job
-
- final ExecutionStage executionStage = executionGraph.getCurrentExecutionStage();
- try {
- requestInstances(executionStage);
- } catch (InstanceException e) {
- final String exceptionMessage = StringUtils.stringifyException(e);
- LOG.error(exceptionMessage);
- this.jobQueue.remove(executionGraph);
- throw new SchedulingException(exceptionMessage);
- }
- }
-
-
- @Override
- public ExecutionGraph getExecutionGraphByID(final JobID jobID) {
-
- synchronized (this.jobQueue) {
-
- final Iterator<ExecutionGraph> it = this.jobQueue.iterator();
- while (it.hasNext()) {
-
- final ExecutionGraph executionGraph = it.next();
- if (executionGraph.getJobID().equals(jobID)) {
- return executionGraph;
- }
- }
- }
-
- return null;
- }
-
-
- @Override
- public void shutdown() {
-
- synchronized (this.jobQueue) {
- this.jobQueue.clear();
- }
-
- }
-
-
- @Override
- public void jobStatusHasChanged(final ExecutionGraph executionGraph, final InternalJobStatus newJobStatus,
- final String optionalMessage) {
-
- if (newJobStatus == InternalJobStatus.FAILED || newJobStatus == InternalJobStatus.FINISHED
- || newJobStatus == InternalJobStatus.CANCELED) {
- removeJobFromSchedule(executionGraph);
- }
- }
-
-
- @Override
- public void nextExecutionStageEntered(final JobID jobID, final ExecutionStage executionStage) {
-
- // Request new instances if necessary
- try {
- requestInstances(executionStage);
- } catch (InstanceException e) {
- // TODO: Handle error correctly
- LOG.error(StringUtils.stringifyException(e));
- }
-
- // Deploy the assigned vertices
- deployAssignedInputVertices(executionStage.getExecutionGraph());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
index eea78d8..bbef991 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
@@ -37,7 +37,7 @@ import eu.stratosphere.util.StringUtils;
/**
* The input split manager is responsible for serving input splits to {@link AbstractInputTask} objects at runtime.
- * Before passed on to the {@link AbstractScheduler}, an {@link ExecutionGraph} is registered with the input split
+ * Before passed on to the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler}, an {@link ExecutionGraph} is registered with the input split
* manager and all included input vertices of the graph register their generated input splits with the manager. Each
* type of input split can be assigned to a specific {@link InputSplitAssigner} which is loaded by the input split
* manager at runtime.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
index 85df81a..3717fbf 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
@@ -16,6 +16,7 @@ package eu.stratosphere.nephele.jobmanager.splitassigner;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import eu.stratosphere.nephele.instance.Instance;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -23,7 +24,6 @@ import eu.stratosphere.core.io.InputSplit;
import eu.stratosphere.core.io.LocatableInputSplit;
import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.instance.AbstractInstance;
import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.nephele.template.AbstractInvokable;
@@ -115,7 +115,7 @@ public final class LocatableInputSplitAssigner implements InputSplitAssigner {
return null;
}
- final AbstractInstance instance = vertex.getAllocatedResource().getInstance();
+ final Instance instance = vertex.getAllocatedResource().getInstance();
if (instance == null) {
LOG.error("Instance is null, returning random split");
return null;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitList.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitList.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitList.java
index c830a6f..7647fae 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitList.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitList.java
@@ -21,16 +21,16 @@ import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Set;
+import eu.stratosphere.nephele.instance.Instance;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import eu.stratosphere.core.io.LocatableInputSplit;
-import eu.stratosphere.nephele.instance.AbstractInstance;
/**
* The locatable input split list stores the locatable input splits for an input vertex that are still expected to be
* consumed. Besides simply storing the splits, the locatable input split list also computes the distance all
- * {@link AbstractInstance} objects which request an input split and its nearest storage location with respect to the
+ * {@link eu.stratosphere.nephele.instance.Instance} objects which request an input split and its nearest storage location with respect to the
* underlying network topology. That way input splits are always given to consuming vertices in a way that data locality
* is preserved as well as possible.
* <p>
@@ -50,13 +50,13 @@ public final class LocatableInputSplitList {
private Set<LocatableInputSplit> masterSet = new HashSet<LocatableInputSplit>();
/**
- * The map caching the specific file input split lists for each {@link AbstractInstance}.
+ * The map caching the specific file input split lists for each {@link eu.stratosphere.nephele.instance.Instance}.
*/
- private Map<AbstractInstance, Queue<QueueElem>> instanceMap = new HashMap<AbstractInstance, Queue<QueueElem>>();
+ private Map<Instance, Queue<QueueElem>> instanceMap = new HashMap<Instance, Queue<QueueElem>>();
/**
* This is an auxiliary class to store the minimum distance between a file input split's storage locations and an
- * {@link AbstractInstance}.
+ * {@link eu.stratosphere.nephele.instance.Instance}.
*
*/
private final class QueueElem implements Comparable<QueueElem> {
@@ -120,7 +120,7 @@ public final class LocatableInputSplitList {
/**
* Returns the next locatable input split to be consumed by the given instance. The returned input split is selected
* in a
- * way that the distance between the split's storage location and the requesting {@link AbstractInstance} is as
+ * way that the distance between the split's storage location and the requesting {@link eu.stratosphere.nephele.instance.Instance} is as
* short as possible.
*
* @param instance
@@ -128,7 +128,7 @@ public final class LocatableInputSplitList {
* @return the next input split to be consumed by the given instance or <code>null</code> if all input splits have
* already been consumed.
*/
- synchronized LocatableInputSplit getNextInputSplit(final AbstractInstance instance) {
+ synchronized LocatableInputSplit getNextInputSplit(final Instance instance) {
final Queue<QueueElem> instanceSplitList = getInstanceSplitList(instance);
@@ -157,16 +157,16 @@ public final class LocatableInputSplitList {
}
/**
- * Returns a list of locatable input splits specifically ordered for the given {@link AbstractInstance}. When the
+ * Returns a list of locatable input splits specifically ordered for the given {@link eu.stratosphere.nephele.instance.Instance}. When the
* list is initially created, it contains all the unconsumed located input splits at that point in time, ascendingly
* ordered
- * by the minimum distance between the input splits' storage locations and the given {@link AbstractInstance}.
+ * by the minimum distance between the input splits' storage locations and the given {@link eu.stratosphere.nephele.instance.Instance}.
*
* @param instance
* the instance for which the locatable input split list has been computed
* @return the list of file input splits ordered specifically for the given instance
*/
- private Queue<QueueElem> getInstanceSplitList(final AbstractInstance instance) {
+ private Queue<QueueElem> getInstanceSplitList(final Instance instance) {
Queue<QueueElem> instanceSplitList = this.instanceMap.get(instance);
if (instanceSplitList == null) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
index 938fb48..7894334 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
@@ -16,6 +16,7 @@ package eu.stratosphere.nephele.jobmanager.splitassigner.file;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import eu.stratosphere.nephele.instance.Instance;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -23,7 +24,6 @@ import eu.stratosphere.core.fs.FileInputSplit;
import eu.stratosphere.core.io.InputSplit;
import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.instance.AbstractInstance;
import eu.stratosphere.nephele.jobmanager.splitassigner.InputSplitAssigner;
import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.nephele.template.AbstractInvokable;
@@ -117,7 +117,7 @@ public final class FileInputSplitAssigner implements InputSplitAssigner {
return null;
}
- final AbstractInstance instance = vertex.getAllocatedResource().getInstance();
+ final Instance instance = vertex.getAllocatedResource().getInstance();
if (instance == null) {
LOG.error("Instance is null, returning random split");
return null;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitList.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitList.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitList.java
index db84a91..ae9898a 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitList.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitList.java
@@ -21,15 +21,15 @@ import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Set;
+import eu.stratosphere.nephele.instance.Instance;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import eu.stratosphere.core.fs.FileInputSplit;
-import eu.stratosphere.nephele.instance.AbstractInstance;
/**
* The file input split list stores the file input splits for an input vertex that are still expected to be consumed.
- * Besides simply storing the splits, the file input split list also computes the distance all {@link AbstractInstance}
+ * Besides simply storing the splits, the file input split list also computes the distance all {@link eu.stratosphere.nephele.instance.Instance}
* objects which request a input split and its nearest storage location with respect to the underlying network topology.
* That way input splits are always given to consuming vertices in a way that data locality is preserved as well as
* possible.
@@ -50,13 +50,13 @@ public final class FileInputSplitList {
private Set<FileInputSplit> masterSet = new HashSet<FileInputSplit>();
/**
- * The map caching the specific file input split lists for each {@link AbstractInstance}.
+ * The map caching the specific file input split lists for each {@link eu.stratosphere.nephele.instance.Instance}.
*/
- private Map<AbstractInstance, Queue<QueueElem>> instanceMap = new HashMap<AbstractInstance, Queue<QueueElem>>();
+ private Map<Instance, Queue<QueueElem>> instanceMap = new HashMap<Instance, Queue<QueueElem>>();
/**
* This is an auxiliary class to store the minimum distance between a file input split's storage locations and an
- * {@link AbstractInstance}.
+ * {@link eu.stratosphere.nephele.instance.Instance}.
*
*/
private final class QueueElem implements Comparable<QueueElem> {
@@ -119,7 +119,7 @@ public final class FileInputSplitList {
/**
* Returns the next file input split to be consumed by the given instance. The returned input split is selected in a
- * way that the distance between the split's storage location and the requesting {@link AbstractInstance} is as
+ * way that the distance between the split's storage location and the requesting {@link eu.stratosphere.nephele.instance.Instance} is as
* short as possible.
*
* @param instance
@@ -127,7 +127,7 @@ public final class FileInputSplitList {
* @return the next input split to be consumed by the given instance or <code>null</code> if all input splits have
* already been consumed.
*/
- synchronized FileInputSplit getNextInputSplit(final AbstractInstance instance) {
+ synchronized FileInputSplit getNextInputSplit(final Instance instance) {
final Queue<QueueElem> instanceSplitList = getInstanceSplitList(instance);
@@ -156,15 +156,15 @@ public final class FileInputSplitList {
}
/**
- * Returns a list of file input splits specifically ordered for the given {@link AbstractInstance}. When the list is
+ * Returns a list of file input splits specifically ordered for the given {@link eu.stratosphere.nephele.instance.Instance}. When the list is
* initially created, it contains all the unconsumed file input splits at that point in time, ascendingly ordered by
- * the minimum distance between the input splits' storage locations and the given {@link AbstractInstance}.
+ * the minimum distance between the input splits' storage locations and the given {@link eu.stratosphere.nephele.instance.Instance}.
*
* @param instance
* the instance for which the file input split list has been computed
* @return the list of file input splits ordered specifically for the given instance
*/
- private Queue<QueueElem> getInstanceSplitList(final AbstractInstance instance) {
+ private Queue<QueueElem> getInstanceSplitList(final Instance instance) {
Queue<QueueElem> instanceSplitList = this.instanceMap.get(instance);
if (instanceSplitList == null) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementGraph.java
index 374656b..fab720d 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementGraph.java
@@ -445,9 +445,8 @@ public final class ManagementGraph extends ManagementAttachment implements IORea
groupVertexID.read(in);
final ManagementGroupVertex groupVertex = this.getGroupVertexByID(groupVertexID);
final String instanceName = StringRecord.readString(in);
- final String instanceType = StringRecord.readString(in);
final int indexInGroup = in.readInt();
- final ManagementVertex vertex = new ManagementVertex(groupVertex, vertexID, instanceName, instanceType, indexInGroup);
+ final ManagementVertex vertex = new ManagementVertex(groupVertex, vertexID, instanceName, indexInGroup);
vertex.read(in);
}
@@ -523,7 +522,6 @@ public final class ManagementGraph extends ManagementAttachment implements IORea
managementVertex.getID().write(out);
managementVertex.getGroupVertex().getID().write(out);
StringRecord.writeString(out, managementVertex.getInstanceName());
- StringRecord.writeString(out, managementVertex.getInstanceType());
out.writeInt(managementVertex.getIndexInGroup());
managementVertex.write(out);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementVertex.java
index 639b1e9..eaececc 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/managementgraph/ManagementVertex.java
@@ -65,11 +65,6 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
private String instanceName;
/**
- * The type of the instance the vertex represented by this management vertex currently runs on.
- */
- private String instanceType;
-
- /**
* The index of this vertex in the management group vertex it belongs to.
*/
private final int indexInGroup;
@@ -88,19 +83,14 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
* the ID of the new management vertex
* @param instanceName
* the name of the instance the vertex represented by this new management vertex currently runs on
- * @param instanceType
- * the type of the instance the vertex represented by this new management vertex currently runs on
- * @param checkpointState
- * the state of the vertex's checkpoint
* @param indexInGroup
* the index of this vertex in the management group vertex it belongs to
*/
public ManagementVertex(final ManagementGroupVertex groupVertex, final ManagementVertexID id,
- final String instanceName, final String instanceType, final int indexInGroup) {
+ final String instanceName, final int indexInGroup) {
this.groupVertex = groupVertex;
this.id = id;
this.instanceName = instanceName;
- this.instanceType = instanceType;
this.indexInGroup = indexInGroup;
@@ -132,15 +122,6 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
}
/**
- * Returns the type of the instance the vertex represented by this management vertex currently runs on.
- *
- * @return the type of the instance the vertex represented by this management vertex currently runs on
- */
- public String getInstanceType() {
- return this.instanceType;
- }
-
- /**
* Returns the number of input gates this management vertex contains.
*
* @return the number of input gates this management vertex contains
@@ -276,16 +257,6 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
this.instanceName = instanceName;
}
- /**
- * Sets the type of instance this vertex currently runs on.
- *
- * @param instanceType
- * the type of instance this vertex currently runs on
- */
- public void setInstanceType(final String instanceType) {
- this.instanceType = instanceType;
- }
-
public void setOptMessage(final String optMessage) {
this.optMessage = optMessage;
}
@@ -294,7 +265,6 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
return this.optMessage;
}
-
@Override
public void read(final DataInput in) throws IOException {
@@ -314,7 +284,6 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
}
this.instanceName = StringRecord.readString(in);
- this.instanceType = StringRecord.readString(in);
}
@@ -331,7 +300,6 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
out.writeInt(this.outputGates.size());
StringRecord.writeString(out, this.instanceName);
- StringRecord.writeString(out, this.instanceType);
}
@Override
@@ -351,7 +319,6 @@ public final class ManagementVertex extends ManagementAttachment implements IORe
json.append("\"vertexname\": \"" + StringUtils.escapeHtml(this.toString()) + "\",");
json.append("\"vertexstatus\": \"" + this.getExecutionState() + "\",");
json.append("\"vertexinstancename\": \"" + this.getInstanceName() + "\",");
- json.append("\"vertexinstancetype\": \"" + this.getInstanceType() + "\"");
json.append("}");
return json.toString();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/net/NetUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/net/NetUtils.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/net/NetUtils.java
index 35979dd..cb08c3a 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/net/NetUtils.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/net/NetUtils.java
@@ -221,6 +221,7 @@ public class NetUtils {
* @return InputStream for reading from the socket.
* @throws IOException
*/
+ @SuppressWarnings("resource")
public static InputStream getInputStream(Socket socket, long timeout) throws IOException {
return (socket.getChannel() == null) ? socket.getInputStream() : new SocketInputStream(socket, timeout);
}
@@ -266,6 +267,7 @@ public class NetUtils {
* @return OutputStream for writing to the socket.
* @throws IOException
*/
+ @SuppressWarnings("resource")
public static OutputStream getOutputStream(Socket socket, long timeout) throws IOException {
return (socket.getChannel() == null) ? socket.getOutputStream() : new SocketOutputStream(socket, timeout);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/profiling/impl/JobProfilingData.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/profiling/impl/JobProfilingData.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/profiling/impl/JobProfilingData.java
index 7d0c980..81b4134 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/profiling/impl/JobProfilingData.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/profiling/impl/JobProfilingData.java
@@ -23,7 +23,7 @@ import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertexIterator;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.instance.AbstractInstance;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.nephele.instance.DummyInstance;
import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
import eu.stratosphere.nephele.profiling.impl.types.InternalInstanceProfilingData;
@@ -75,7 +75,7 @@ public class JobProfilingData {
public InstanceSummaryProfilingEvent getInstanceSummaryProfilingData(long timestamp) {
- final Set<AbstractInstance> tempSet = new HashSet<AbstractInstance>();
+ final Set<Instance> tempSet = new HashSet<Instance>();
// First determine the number of allocated instances in the current stage
final ExecutionGroupVertexIterator it = new ExecutionGroupVertexIterator(this.executionGraph, true,
this.executionGraph.getIndexOfCurrentExecutionStage());
@@ -84,7 +84,7 @@ public class JobProfilingData {
final ExecutionGroupVertex groupVertex = it.next();
for (int i = 0; i < groupVertex.getCurrentNumberOfGroupMembers(); i++) {
final ExecutionVertex executionVertex = groupVertex.getGroupMember(i);
- final AbstractInstance instance = executionVertex.getAllocatedResource().getInstance();
+ final Instance instance = executionVertex.getAllocatedResource().getInstance();
if(!(instance instanceof DummyInstance)) {
tempSet.add(instance);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/ExtendedManagementProtocol.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/ExtendedManagementProtocol.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/ExtendedManagementProtocol.java
index 59ec15d..c731285 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/ExtendedManagementProtocol.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/ExtendedManagementProtocol.java
@@ -15,13 +15,10 @@ package eu.stratosphere.nephele.protocols;
import java.io.IOException;
import java.util.List;
-import java.util.Map;
import eu.stratosphere.core.io.StringRecord;
import eu.stratosphere.nephele.event.job.AbstractEvent;
import eu.stratosphere.nephele.event.job.RecentJobEvent;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.managementgraph.ManagementGraph;
import eu.stratosphere.nephele.managementgraph.ManagementVertexID;
@@ -104,19 +101,6 @@ public interface ExtendedManagementProtocol extends JobManagementProtocol {
void killInstance(StringRecord instanceName) throws IOException;
/**
- * Returns a map of all instance types which are currently available to Nephele. The map contains a description of
- * the hardware characteristics for each instance type as provided in the configuration file. Moreover, it contains
- * the actual hardware description as reported by task managers running on the individual instances. If available,
- * the map also contains the maximum number instances Nephele can allocate of each instance type (i.e. if no other
- * job occupies instances).
- *
- * @return a list of all instance types available to Nephele
- * @throws IOException
- * thrown if an error occurs while transmitting the list
- */
- Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes() throws IOException;
-
- /**
* Triggers all task managers involved in processing the job with the given job ID to write the utilization of
* their read and write buffers to their log files. This method is primarily for debugging purposes.
*
@@ -126,4 +110,11 @@ public interface ExtendedManagementProtocol extends JobManagementProtocol {
* throws if an error occurs while transmitting the request
*/
void logBufferUtilization(JobID jobID) throws IOException;
+
+ /**
+ * Returns the number of available slots among the registered task managers
+ * @return number of available slots
+ * @throws IOException
+ */
+ int getAvailableSlots() throws IOException;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
index 8cd5e26..5070b51 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
@@ -19,6 +19,8 @@ import eu.stratosphere.core.protocols.VersionedProtocol;
import eu.stratosphere.nephele.instance.HardwareDescription;
import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
import eu.stratosphere.nephele.taskmanager.TaskExecutionState;
+import eu.stratosphere.nephele.taskmanager.transferenvelope.RegisterTaskManagerResult;
+import eu.stratosphere.nephele.types.IntegerRecord;
/**
* The job manager protocol is implemented by the job manager and offers functionality
@@ -33,12 +35,23 @@ public interface JobManagerProtocol extends VersionedProtocol {
*
* @param instanceConnectionInfo
* the information the job manager requires to connect to the instance's task manager
- * @param hardwareDescription
- * a hardware description with details on the instance's compute resources.
* @throws IOException
* thrown if an error occurs during this remote procedure call
*/
- void sendHeartbeat(InstanceConnectionInfo instanceConnectionInfo, HardwareDescription hardwareDescription)
+ void sendHeartbeat(InstanceConnectionInfo instanceConnectionInfo)
+ throws IOException;
+
+ /**
+ * Registers a task manager at the JobManager.
+ *
+ * @param instanceConnectionInfo the information the job manager requires to connect to the instance's task manager
+ * @param hardwareDescription a hardware description with details on the instance's compute resources.
+ * @throws IOException
+ *
+ * @return whether the task manager was successfully registered
+ */
+ RegisterTaskManagerResult registerTaskManager(InstanceConnectionInfo instanceConnectionInfo,
+ HardwareDescription hardwareDescription,IntegerRecord numberOfSlots)
throws IOException;
/**
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/iomanager/ChannelAccess.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/iomanager/ChannelAccess.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/iomanager/ChannelAccess.java
index ccbc64a..85432eb 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/iomanager/ChannelAccess.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/iomanager/ChannelAccess.java
@@ -75,6 +75,7 @@ public abstract class ChannelAccess<T, R extends IORequest>
this.requestQueue = requestQueue;
try {
+ @SuppressWarnings("resource")
RandomAccessFile file = new RandomAccessFile(id.getPath(), writeEnabled ? "rw" : "r");
this.fileChannel = file.getChannel();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/MemoryManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/MemoryManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/MemoryManager.java
index a8fe096..8b20c75 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/MemoryManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/MemoryManager.java
@@ -69,16 +69,29 @@ public interface MemoryManager {
* @return The size of the pages handled by the memory manager.
*/
int getPageSize();
+
+ /**
+ * Returns the total size of memory.
+ * @return
+ */
+ long getMemorySize();
/**
* Computes to how many pages the given number of bytes corresponds. If the given number of bytes is not an
* exact multiple of a page size, the result is rounded down, such that a portion of the memory (smaller
* than the page size) is not included.
*
- * @param numBytes The number of bytes to convert to a page count.
+ * @param fraction the fraction of the total memory per slot
* @return The number of pages to which
*/
- int computeNumberOfPages(long numBytes);
+ int computeNumberOfPages(double fraction);
+
+ /**
+ * Computes the memory size of the fraction per slot.
+ * @param fraction
+ * @return
+ */
+ long computeMemorySize(double fraction);
/**
* Rounds the given value down to a multiple of the memory manager's page size.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/spi/DefaultMemoryManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/spi/DefaultMemoryManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/spi/DefaultMemoryManager.java
index 8bc7b13..d4a2b36 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/spi/DefaultMemoryManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/services/memorymanager/spi/DefaultMemoryManager.java
@@ -66,6 +66,13 @@ public class DefaultMemoryManager implements MemoryManager {
private boolean isShutDown; // flag whether the close() has already been invoked.
+ /**
+ * Number of slots of the task manager
+ */
+ private final int numberOfSlots;
+
+ private final long memorySize;
+
// ------------------------------------------------------------------------
// Constructors / Destructors
// ------------------------------------------------------------------------
@@ -75,8 +82,8 @@ public class DefaultMemoryManager implements MemoryManager {
*
* @param memorySize The total size of the memory to be managed by this memory manager.
*/
- public DefaultMemoryManager(long memorySize) {
- this(memorySize, DEFAULT_PAGE_SIZE);
+ public DefaultMemoryManager(long memorySize, int numberOfSlots) {
+ this(memorySize, numberOfSlots, DEFAULT_PAGE_SIZE);
}
/**
@@ -85,7 +92,7 @@ public class DefaultMemoryManager implements MemoryManager {
* @param memorySize The total size of the memory to be managed by this memory manager.
* @param pageSize The size of the pages handed out by the memory manager.
*/
- public DefaultMemoryManager(long memorySize, int pageSize) {
+ public DefaultMemoryManager(long memorySize, int numberOfSlots, int pageSize) {
// sanity checks
if (memorySize <= 0) {
throw new IllegalArgumentException("Size of total memory must be positive.");
@@ -97,6 +104,10 @@ public class DefaultMemoryManager implements MemoryManager {
// not a power of two
throw new IllegalArgumentException("The given page size is not a power of two.");
}
+
+ this.memorySize = memorySize;
+
+ this.numberOfSlots = numberOfSlots;
// assign page size and bit utilities
this.pageSize = pageSize;
@@ -348,8 +359,18 @@ public class DefaultMemoryManager implements MemoryManager {
}
@Override
- public int computeNumberOfPages(long numBytes) {
- return getNumPages(numBytes);
+ public long getMemorySize() {
+ return this.memorySize;
+ }
+
+ @Override
+ public int computeNumberOfPages(double fraction) {
+ return getRelativeNumPages(fraction);
+ }
+
+ @Override
+ public long computeMemorySize(double fraction) {
+ return this.pageSize*computeNumberOfPages(fraction);
}
@Override
@@ -371,6 +392,14 @@ public class DefaultMemoryManager implements MemoryManager {
throw new IllegalArgumentException("The given number of bytes correstponds to more than MAX_INT pages.");
}
}
+
+ private final int getRelativeNumPages(double fraction){
+ if(fraction < 0){
+ throw new IllegalArgumentException("The fraction of memory to allocate must not be negative.");
+ }
+
+ return (int)(this.totalNumPages * fraction / this.numberOfSlots);
+ }
// ------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
index ef0f6ab..5966cf9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
@@ -45,6 +45,10 @@ import eu.stratosphere.nephele.ExecutionMode;
import eu.stratosphere.runtime.io.network.LocalConnectionManager;
import eu.stratosphere.runtime.io.network.NetworkConnectionManager;
import eu.stratosphere.runtime.io.network.netty.NettyConnectionManager;
+import eu.stratosphere.nephele.instance.Hardware;
+import eu.stratosphere.nephele.taskmanager.transferenvelope.RegisterTaskManagerResult;
+import eu.stratosphere.nephele.types.IntegerRecord;
+
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@@ -148,7 +152,9 @@ public class TaskManager implements TaskOperationProtocol {
private final IOManager ioManager;
- private static HardwareDescription hardwareDescription = null;
+ private final HardwareDescription hardwareDescription;
+
+ private final int numberOfSlots;
private final Thread heartbeatThread;
@@ -156,10 +162,10 @@ public class TaskManager implements TaskOperationProtocol {
/** Stores whether the task manager has already been shut down. */
private volatile boolean shutdownComplete;
-
+
/**
* Constructs a new task manager, starts its IPC service and attempts to discover the job manager to
- * receive an initial configuration. All parameters are obtained from the
+ * receive an initial configuration. All parameters are obtained from the
* {@link GlobalConfiguration}, which must be loaded prior to instantiating the task manager.
*/
public TaskManager(ExecutionMode executionMode) throws Exception {
@@ -169,30 +175,31 @@ public class TaskManager implements TaskOperationProtocol {
LOG.info("Execution mode: " + executionMode);
// IMPORTANT! At this point, the GlobalConfiguration must have been read!
-
+
final InetSocketAddress jobManagerAddress;
{
LOG.info("Reading location of job manager from configuration");
-
+
final String address = GlobalConfiguration.getString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, null);
final int port = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT);
-
+
if (address == null) {
throw new Exception("Job manager address not configured in the GlobalConfiguration.");
}
-
+
// Try to convert configured address to {@link InetAddress}
try {
final InetAddress tmpAddress = InetAddress.getByName(address);
jobManagerAddress = new InetSocketAddress(tmpAddress, port);
- } catch (UnknownHostException e) {
+ }
+ catch (UnknownHostException e) {
LOG.fatal("Could not resolve JobManager host name.");
throw new Exception("Could not resolve JobManager host name: " + e.getMessage(), e);
}
-
+
LOG.info("Connecting to JobManager at: " + jobManagerAddress);
}
-
+
// Create RPC connection to the JobManager
try {
this.jobManager = RPC.getProxy(JobManagerProtocol.class, jobManagerAddress, NetUtils.getSocketFactory());
@@ -200,7 +207,7 @@ public class TaskManager implements TaskOperationProtocol {
LOG.fatal("Could not connect to the JobManager: " + e.getMessage(), e);
throw new Exception("Failed to initialize connection to JobManager: " + e.getMessage(), e);
}
-
+
int ipcPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_IPC_PORT_KEY, -1);
int dataPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, -1);
if (ipcPort == -1) {
@@ -209,16 +216,17 @@ public class TaskManager implements TaskOperationProtocol {
if (dataPort == -1) {
dataPort = getAvailablePort();
}
-
+
// Determine our own public facing address and start the server
{
final InetAddress taskManagerAddress;
try {
taskManagerAddress = getTaskManagerAddress(jobManagerAddress);
- } catch (Exception e) {
+ }
+ catch (Exception e) {
throw new RuntimeException("The TaskManager failed to determine its own network address.", e);
}
-
+
this.localInstanceConnectionInfo = new InstanceConnectionInfo(taskManagerAddress, ipcPort, dataPort);
LOG.info("TaskManager connection information:" + this.localInstanceConnectionInfo);
@@ -231,7 +239,7 @@ public class TaskManager implements TaskOperationProtocol {
throw new Exception("Failed to start taskmanager server. " + e.getMessage(), e);
}
}
-
+
// Try to create local stub of the global input split provider
try {
this.globalInputSplitProvider = RPC.getProxy(InputSplitProviderProtocol.class, jobManagerAddress, NetUtils.getSocketFactory());
@@ -258,21 +266,19 @@ public class TaskManager implements TaskOperationProtocol {
// Load profiler if it should be used
if (GlobalConfiguration.getBoolean(ProfilingUtils.ENABLE_PROFILING_KEY, false)) {
-
+
final String profilerClassName = GlobalConfiguration.getString(ProfilingUtils.TASKMANAGER_CLASSNAME_KEY,
- "eu.stratosphere.nephele.profiling.impl.TaskManagerProfilerImpl");
-
+ "eu.stratosphere.nephele.profiling.impl.TaskManagerProfilerImpl");
+
this.profiler = ProfilingUtils.loadTaskManagerProfiler(profilerClassName, jobManagerAddress.getAddress(),
- this.localInstanceConnectionInfo);
-
+ this.localInstanceConnectionInfo);
+
if (this.profiler == null) {
LOG.error("Cannot find class name for the profiler.");
- }
- else {
+ } else {
LOG.info("Profiling of jobs is enabled.");
}
- }
- else {
+ } else {
this.profiler = null;
LOG.info("Profiling of jobs is disabled.");
}
@@ -282,10 +288,11 @@ public class TaskManager implements TaskOperationProtocol {
ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH).split(",|" + File.pathSeparator);
checkTempDirs(tmpDirPaths);
-
+
final int pageSize = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NETWORK_BUFFER_SIZE_KEY,
- ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_BUFFER_SIZE);
+ ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_BUFFER_SIZE);
+ // Initialize network buffer pool
int numBuffers = GlobalConfiguration.getInteger(
ConfigConstants.TASK_MANAGER_NETWORK_NUM_BUFFERS_KEY,
ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_NUM_BUFFERS);
@@ -333,6 +340,8 @@ public class TaskManager implements TaskOperationProtocol {
{
HardwareDescription resources = HardwareDescriptionFactory.extractFromSystem();
+ numberOfSlots = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS,
+ Hardware.getNumberCPUCores());
// Check whether the memory size has been explicitly configured. if so that overrides the default mechanism
// of taking as much as is mentioned in the hardware description
@@ -341,29 +350,30 @@ public class TaskManager implements TaskOperationProtocol {
if (memorySize > 0) {
// manually configured memory size. override the value in the hardware config
resources = HardwareDescriptionFactory.construct(resources.getNumberOfCPUCores(),
- resources.getSizeOfPhysicalMemory(), memorySize * 1024L * 1024L);
+ resources.getSizeOfPhysicalMemory(), memorySize * 1024L * 1024L);
}
this.hardwareDescription = resources;
// Initialize the memory manager
LOG.info("Initializing memory manager with " + (resources.getSizeOfFreeMemory() >>> 20) + " megabytes of memory. " +
"Page size is " + pageSize + " bytes.");
-
+
try {
@SuppressWarnings("unused")
final boolean lazyAllocation = GlobalConfiguration.getBoolean(ConfigConstants.TASK_MANAGER_MEMORY_LAZY_ALLOCATION_KEY,
- ConfigConstants.DEFAULT_TASK_MANAGER_MEMORY_LAZY_ALLOCATION);
-
- this.memoryManager = new DefaultMemoryManager(resources.getSizeOfFreeMemory(), pageSize);
+ ConfigConstants.DEFAULT_TASK_MANAGER_MEMORY_LAZY_ALLOCATION);
+
+ this.memoryManager = new DefaultMemoryManager(resources.getSizeOfFreeMemory(), this.numberOfSlots,
+ pageSize);
} catch (Throwable t) {
LOG.fatal("Unable to initialize memory manager with " + (resources.getSizeOfFreeMemory() >>> 20)
- + " megabytes of memory.", t);
+ + " megabytes of memory.", t);
throw new Exception("Unable to initialize memory manager.", t);
}
}
this.ioManager = new IOManager(tmpDirPaths);
-
+
this.heartbeatThread = new Thread() {
@Override
public void run() {
@@ -510,19 +520,33 @@ public class TaskManager implements TaskOperationProtocol {
ConfigConstants.TASK_MANAGER_HEARTBEAT_INTERVAL_KEY,
ConfigConstants.DEFAULT_TASK_MANAGER_HEARTBEAT_INTERVAL);
- while (!shutdownStarted.get()) {
- // send heart beat
- try {
- LOG.debug("heartbeat");
- this.jobManager.sendHeartbeat(this.localInstanceConnectionInfo, this.hardwareDescription);
- } catch (IOException e) {
- if (shutdownStarted.get()) {
+ try {
+ while(!shutdownStarted.get()){
+ RegisterTaskManagerResult result = this.jobManager.registerTaskManager(this
+ .localInstanceConnectionInfo,this.hardwareDescription,
+ new IntegerRecord(this.numberOfSlots));
+
+ if(result.getReturnCode() == RegisterTaskManagerResult.ReturnCode.SUCCESS){
break;
- } else {
- LOG.error("Sending the heart beat caused an exception: " + e.getMessage(), e);
+ }
+
+ try{
+ Thread.sleep(50);
+ }catch(InterruptedException e){
+ if (!shutdownStarted.get()) {
+ LOG.error("TaskManager register task manager loop was interrupted without shutdown.");
+ }
}
}
-
+
+ } catch (IOException e) {
+ if(!shutdownStarted.get()){
+ LOG.error("Registering task manager caused an exception: " + e.getMessage(), e);
+ }
+ return;
+ }
+
+ while (!shutdownStarted.get()) {
// sleep until the next heart beat
try {
Thread.sleep(interval);
@@ -532,9 +556,22 @@ public class TaskManager implements TaskOperationProtocol {
LOG.error("TaskManager heart beat loop was interrupted without shutdown.");
}
}
+
+ // send heart beat
+ try {
+ LOG.debug("heartbeat");
+ this.jobManager.sendHeartbeat(this.localInstanceConnectionInfo);
+ } catch (IOException e) {
+ if (shutdownStarted.get()) {
+ break;
+ } else {
+ LOG.error("Sending the heart beat caused an exception: " + e.getMessage(), e);
+ }
+ }
}
}
+
/**
* The states of address detection mechanism.
* There is only a state transition if the current state failed to determine the address.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/transferenvelope/RegisterTaskManagerResult.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/transferenvelope/RegisterTaskManagerResult.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/transferenvelope/RegisterTaskManagerResult.java
new file mode 100644
index 0000000..b396edd
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/transferenvelope/RegisterTaskManagerResult.java
@@ -0,0 +1,50 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.taskmanager.transferenvelope;
+
+import eu.stratosphere.core.io.IOReadableWritable;
+import eu.stratosphere.nephele.util.EnumUtils;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+public class RegisterTaskManagerResult implements IOReadableWritable {
+ public enum ReturnCode{
+ SUCCESS, FAILURE
+ };
+
+ public RegisterTaskManagerResult(){
+ this.returnCode = ReturnCode.SUCCESS;
+ }
+
+ public RegisterTaskManagerResult(ReturnCode returnCode){
+ this.returnCode = returnCode;
+ }
+
+ private ReturnCode returnCode;
+
+ public ReturnCode getReturnCode() { return this.returnCode; }
+
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ EnumUtils.writeEnum(out, this.returnCode);
+ }
+
+ @Override
+ public void read(DataInput in) throws IOException {
+ this.returnCode = EnumUtils.readEnum(in, ReturnCode.class);
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/topology/NetworkNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/topology/NetworkNode.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/topology/NetworkNode.java
index 09df691..9f6542b 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/topology/NetworkNode.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/topology/NetworkNode.java
@@ -33,8 +33,6 @@ public class NetworkNode implements IOReadableWritable {
private final List<NetworkNode> childNodes = new ArrayList<NetworkNode>();
- private Object attachment;
-
protected NetworkNode(final String name, final NetworkNode parentNode, final NetworkTopology networkTopology) {
this.name = name;
this.parentNode = parentNode;
@@ -119,14 +117,6 @@ public class NetworkNode implements IOReadableWritable {
return this.childNodes.size();
}
- public void setAttachment(final Object attachment) {
- this.attachment = attachment;
- }
-
- public Object getAttachment() {
- return this.attachment;
- }
-
public NetworkNode getChildNode(final int index) {
if (index < this.childNodes.size()) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/util/IOUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/util/IOUtils.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/util/IOUtils.java
index 0ca490b..554bac5 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/util/IOUtils.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/util/IOUtils.java
@@ -56,6 +56,7 @@ public final class IOUtils {
public static void copyBytes(final InputStream in, final OutputStream out, final int buffSize, final boolean close)
throws IOException {
+ @SuppressWarnings("resource")
final PrintStream ps = out instanceof PrintStream ? (PrintStream) out : null;
final byte[] buf = new byte[buffSize];
try {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/cache/FileCache.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/cache/FileCache.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/cache/FileCache.java
index e4f0a4b..fe63ebe 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/cache/FileCache.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/cache/FileCache.java
@@ -14,7 +14,6 @@
package eu.stratosphere.pact.runtime.cache;
import eu.stratosphere.api.common.cache.DistributedCache;
-
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
@@ -138,14 +137,12 @@ public class FileCache {
* Asynchronous file copy process
*/
private class CopyProcess implements Callable<Path> {
+
private JobID jobID;
- @SuppressWarnings("unused")
- private String name;
private String filePath;
private Boolean executable;
public CopyProcess(String name, DistributedCacheEntry e, JobID jobID) {
- this.name = name;
this.filePath = e.filePath;
this.executable = e.isExecutable;
this.jobID = jobID;
@@ -168,15 +165,13 @@ public class FileCache {
* If no task is using this file after 5 seconds, clear it.
*/
private class DeleteProcess implements Runnable {
+
private String name;
- @SuppressWarnings("unused")
- private String filePath;
private JobID jobID;
private int oldCount;
public DeleteProcess(String name, DistributedCacheEntry e, JobID jobID, int c) {
this.name = name;
- this.filePath = e.filePath;
this.jobID = jobID;
this.oldCount = c;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstHashMatchIterator.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstHashMatchIterator.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstHashMatchIterator.java
index ddfa446..a060d28 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstHashMatchIterator.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstHashMatchIterator.java
@@ -60,7 +60,7 @@ public class BuildFirstHashMatchIterator<V1, V2, O> implements JoinTaskIterator<
TypeSerializer<V1> serializer1, TypeComparator<V1> comparator1,
TypeSerializer<V2> serializer2, TypeComparator<V2> comparator2,
TypePairComparator<V2, V1> pairComparator,
- MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, long totalMemory)
+ MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, double memoryFraction)
throws MemoryAllocationException
{
this.memManager = memManager;
@@ -73,7 +73,7 @@ public class BuildFirstHashMatchIterator<V1, V2, O> implements JoinTaskIterator<
this.probeCopy = serializer2.createInstance();
this.hashJoin = getHashJoin(serializer1, comparator1, serializer2, comparator2, pairComparator,
- memManager, ioManager, ownerTask, totalMemory);
+ memManager, ioManager, ownerTask, memoryFraction);
}
// --------------------------------------------------------------------------------------------
@@ -152,10 +152,10 @@ public class BuildFirstHashMatchIterator<V1, V2, O> implements JoinTaskIterator<
public <BT, PT> MutableHashTable<BT, PT> getHashJoin(TypeSerializer<BT> buildSideSerializer, TypeComparator<BT> buildSideComparator,
TypeSerializer<PT> probeSideSerializer, TypeComparator<PT> probeSideComparator,
TypePairComparator<PT, BT> pairComparator,
- MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, long totalMemory)
+ MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, double memoryFraction)
throws MemoryAllocationException
{
- final int numPages = memManager.computeNumberOfPages(totalMemory);
+ final int numPages = memManager.computeNumberOfPages(memoryFraction);
final List<MemorySegment> memorySegments = memManager.allocatePages(ownerTask, numPages);
return new MutableHashTable<BT, PT>(buildSideSerializer, probeSideSerializer, buildSideComparator, probeSideComparator, pairComparator, memorySegments, ioManager);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstReOpenableHashMatchIterator.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstReOpenableHashMatchIterator.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstReOpenableHashMatchIterator.java
index d699462..8c2b9ca 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstReOpenableHashMatchIterator.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildFirstReOpenableHashMatchIterator.java
@@ -38,21 +38,21 @@ public class BuildFirstReOpenableHashMatchIterator<V1, V2, O> extends BuildFirst
TypeSerializer<V2> serializer2, TypeComparator<V2> comparator2,
TypePairComparator<V2, V1> pairComparator,
MemoryManager memManager, IOManager ioManager,
- AbstractInvokable ownerTask, long totalMemory)
+ AbstractInvokable ownerTask, double memoryFraction)
throws MemoryAllocationException {
super(firstInput, secondInput, serializer1, comparator1, serializer2,
comparator2, pairComparator, memManager, ioManager, ownerTask,
- totalMemory);
+ memoryFraction);
reopenHashTable = (ReOpenableMutableHashTable<V1, V2>) hashJoin;
}
public <BT, PT> MutableHashTable<BT, PT> getHashJoin(TypeSerializer<BT> buildSideSerializer, TypeComparator<BT> buildSideComparator,
TypeSerializer<PT> probeSideSerializer, TypeComparator<PT> probeSideComparator,
TypePairComparator<PT, BT> pairComparator,
- MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, long totalMemory)
+ MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, double memoryFraction)
throws MemoryAllocationException
{
- final int numPages = memManager.computeNumberOfPages(totalMemory);
+ final int numPages = memManager.computeNumberOfPages(memoryFraction);
final List<MemorySegment> memorySegments = memManager.allocatePages(ownerTask, numPages);
return new ReOpenableMutableHashTable<BT, PT>(buildSideSerializer, probeSideSerializer, buildSideComparator, probeSideComparator, pairComparator, memorySegments, ioManager);
}
[15/22] git commit: Cleanup of merge with slot-based scheduler branch.
Posted by se...@apache.org.
Cleanup of merge with slot-based scheduler branch.
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/429493d0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/429493d0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/429493d0
Branch: refs/heads/master
Commit: 429493d027700c3635c8045ad1087511e456d04f
Parents: 86d206c
Author: Stephan Ewen <se...@apache.org>
Authored: Thu Jun 19 00:15:46 2014 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:20 2014 +0200
----------------------------------------------------------------------
.../eu/stratosphere/client/program/Client.java | 17 +---
.../client/program/PackagedProgram.java | 1 +
.../stratosphere/client/program/ClientTest.java | 3 +-
.../compiler/costs/DefaultCostEstimator.java | 6 +-
.../compiler/dag/DataSourceNode.java | 2 +-
.../pact/compiler/DOPChangeTest.java | 8 +-
.../pact/compiler/IterationsCompilerTest.java | 4 +-
.../nephele/execution/RuntimeEnvironment.java | 72 +++++++++++--
.../nephele/protocols/JobManagerProtocol.java | 2 +-
.../stratosphere/nephele/taskmanager/Task.java | 70 +++----------
.../nephele/taskmanager/TaskManager.java | 10 +-
.../local/LocalInstanceManagerTest.java | 19 ++--
.../nephele/jobmanager/JobManagerITCase.java | 19 ++--
.../nephele/util/ServerTestUtils.java | 48 +--------
.../netty/InboundEnvelopeDecoderTest.java | 2 +-
.../confs/jobmanager/nephele-default.xml | 51 ----------
.../test/javaApiOperators/SumMinMaxITCase.java | 4 +-
.../PackagedProgramEndToEndITCase.java | 48 +++++----
.../test/util/testjar/KMeansForTest.java | 102 +++++--------------
19 files changed, 183 insertions(+), 305 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java b/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
index 31138f6..ec66f4a 100644
--- a/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
+++ b/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
@@ -78,9 +78,6 @@ public class Client {
configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerAddress.getPort());
this.compiler = new PactCompiler(new DataStatistics(), new DefaultCostEstimator());
-
- // Disable Local Execution when using a Client
- ContextEnvironment.disableLocalExecution();
}
/**
@@ -105,9 +102,6 @@ public class Client {
}
this.compiler = new PactCompiler(new DataStatistics(), new DefaultCostEstimator());
-
- // Disable Local Execution when using a Client
- ContextEnvironment.disableLocalExecution();
}
public void setPrintStatusDuringExecution(boolean print) {
@@ -152,20 +146,13 @@ public class Client {
ByteArrayOutputStream baes = new ByteArrayOutputStream();
System.setErr(new PrintStream(baes));
try {
+ ContextEnvironment.disableLocalExecution();
prog.invokeInteractiveModeForExecution();
}
catch (ProgramInvocationException e) {
- System.setOut(originalOut);
- System.setErr(originalErr);
- System.err.println(baes);
- System.out.println(baos);
throw e;
}
catch (Throwable t) {
- System.setOut(originalOut);
- System.setErr(originalErr);
- System.err.println(baes);
- System.out.println(baos);
// the invocation gets aborted with the preview plan
if (env.optimizerPlan != null) {
return env.optimizerPlan;
@@ -240,6 +227,8 @@ public class Client {
}
env.setAsContext();
+ ContextEnvironment.disableLocalExecution();
+
if (wait) {
// invoke here
prog.invokeInteractiveModeForExecution();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-clients/src/main/java/eu/stratosphere/client/program/PackagedProgram.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/main/java/eu/stratosphere/client/program/PackagedProgram.java b/stratosphere-clients/src/main/java/eu/stratosphere/client/program/PackagedProgram.java
index 51d2e34..edf36b3 100644
--- a/stratosphere-clients/src/main/java/eu/stratosphere/client/program/PackagedProgram.java
+++ b/stratosphere-clients/src/main/java/eu/stratosphere/client/program/PackagedProgram.java
@@ -215,6 +215,7 @@ public class PackagedProgram {
PreviewPlanEnvironment env = new PreviewPlanEnvironment();
env.setAsContext();
try {
+ ContextEnvironment.disableLocalExecution();
invokeInteractiveModeForExecution();
}
catch (ProgramInvocationException e) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java b/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
index 244ec4a..b3f8159 100644
--- a/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
+++ b/stratosphere-clients/src/test/java/eu/stratosphere/client/program/ClientTest.java
@@ -20,7 +20,6 @@ import static org.mockito.MockitoAnnotations.initMocks;
import static org.powermock.api.mockito.PowerMockito.whenNew;
import java.io.IOException;
-import java.net.InetSocketAddress;
import org.junit.Before;
import org.junit.Test;
@@ -95,7 +94,7 @@ public class ClientTest {
when(program.getPlanWithJars()).thenReturn(planWithJarsMock);
when(planWithJarsMock.getPlan()).thenReturn(planMock);
- whenNew(PactCompiler.class).withArguments(any(DataStatistics.class), any(CostEstimator.class), any(InetSocketAddress.class)).thenReturn(this.compilerMock);
+ whenNew(PactCompiler.class).withArguments(any(DataStatistics.class), any(CostEstimator.class)).thenReturn(this.compilerMock);
when(compilerMock.compile(planMock)).thenReturn(optimizedPlanMock);
whenNew(NepheleJobGraphGenerator.class).withNoArguments().thenReturn(generatorMock);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
index fde5970..3c52f6a 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
@@ -34,7 +34,7 @@ public class DefaultCostEstimator extends CostEstimator {
* The case of the estimation for all relative costs. We heuristically pick a very large data volume, which
* will favor strategies that are less expensive on large data volumes. This is robust and
*/
- private static final long HEURISTIC_COST_BASE = 10000000000l;
+ private static final long HEURISTIC_COST_BASE = 1000000000L;
// The numbers for the CPU effort are rather magic at the moment and should be seen rather ordinal
@@ -105,9 +105,9 @@ public class DefaultCostEstimator extends CostEstimator {
} else {
costs.addNetworkCost(replicationFactor * estOutShipSize);
}
- costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE * replicationFactor);
+ costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE * 10 * replicationFactor);
} else {
- costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE * 200);
+ costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE * 1000);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
index 7234420..b6d6b71 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
@@ -168,7 +168,7 @@ public class DataSourceNode extends OptimizerNode {
return this.cachedPlans;
}
- SourcePlanNode candidate = new SourcePlanNode(this, "DataSource("+this.getPactContract().getName()+")");
+ SourcePlanNode candidate = new SourcePlanNode(this, "DataSource ("+this.getPactContract().getName()+")");
candidate.updatePropertiesWithUniqueSets(getUniqueFields());
final Costs costs = new Costs();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/DOPChangeTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/DOPChangeTest.java b/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/DOPChangeTest.java
index 273c42c..605f197 100644
--- a/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/DOPChangeTest.java
+++ b/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/DOPChangeTest.java
@@ -209,15 +209,15 @@ public class DOPChangeTest extends CompilerTestBase {
ShipStrategyType mapIn = map2Node.getInput().getShipStrategy();
ShipStrategyType reduceIn = red2Node.getInput().getShipStrategy();
- Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.FORWARD, mapIn);
- Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.PARTITION_HASH, reduceIn);
+ Assert.assertTrue("Invalid ship strategy for an operator.",
+ (ShipStrategyType.PARTITION_RANDOM == mapIn && ShipStrategyType.PARTITION_HASH == reduceIn) ||
+ (ShipStrategyType.PARTITION_HASH == mapIn && ShipStrategyType.FORWARD == reduceIn));
}
@Test
- public void checkPropertyHandlingWithDecreasingDegreeOfParallelism()
- {
+ public void checkPropertyHandlingWithDecreasingDegreeOfParallelism() {
final int degOfPar = DEFAULT_PARALLELISM;
// construct the plan
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/IterationsCompilerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/IterationsCompilerTest.java b/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/IterationsCompilerTest.java
index 05a863c..c6ebf50 100644
--- a/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/IterationsCompilerTest.java
+++ b/stratosphere-compiler/src/test/java/eu/stratosphere/pact/compiler/IterationsCompilerTest.java
@@ -153,6 +153,7 @@ public class IterationsCompilerTest extends CompilerTestBase {
public void testIterationPushingWorkOut() throws Exception {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
+ env.setDegreeOfParallelism(8);
DataSet<Tuple2<Long, Long>> input1 = env.readCsvFile("/some/file/path").types(Long.class).map(new DuplicateValue());
@@ -168,6 +169,7 @@ public class IterationsCompilerTest extends CompilerTestBase {
BulkIterationPlanNode bipn = (BulkIterationPlanNode) op.getDataSinks().iterator().next().getInput().getSource();
+ // check that work has not! been pushed out, as the end of the step function does not produce the necessary properties
for (Channel c : bipn.getPartialSolutionPlanNode().getOutgoingChannels()) {
assertEquals(ShipStrategyType.PARTITION_HASH, c.getShipStrategy());
}
@@ -182,7 +184,7 @@ public class IterationsCompilerTest extends CompilerTestBase {
public static DataSet<Tuple2<Long, Long>> doBulkIteration(DataSet<Tuple2<Long, Long>> vertices, DataSet<Tuple2<Long, Long>> edges) {
// open a bulk iteration
- IterativeDataSet<Tuple2<Long, Long>> iteration = vertices.iterate(100);
+ IterativeDataSet<Tuple2<Long, Long>> iteration = vertices.iterate(20);
DataSet<Tuple2<Long, Long>> changes = iteration
.join(edges).where(0).equalTo(0).with(new Join222())
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
index cc542c0..4e07694 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
@@ -36,6 +36,7 @@ import eu.stratosphere.runtime.io.network.bufferprovider.GlobalBufferPool;
import eu.stratosphere.runtime.io.network.bufferprovider.LocalBufferPool;
import eu.stratosphere.runtime.io.network.bufferprovider.LocalBufferPoolOwner;
import eu.stratosphere.util.StringUtils;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -108,11 +109,6 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
private final AbstractInvokable invokable;
/**
- * The thread executing the task in the environment.
- */
- private volatile Thread executingThread = null;
-
- /**
* The ID of the job this task belongs to.
*/
private final JobID jobID;
@@ -136,6 +132,11 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
* The observer object for the task's execution.
*/
private volatile ExecutionObserver executionObserver = null;
+
+ /**
+ * The thread executing the task in the environment.
+ */
+ private volatile Thread executingThread;
/**
* The RPC proxy to report accumulators to JobManager
@@ -159,7 +160,9 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
private LocalBufferPool outputBufferPool;
- private Map<String,FutureTask<Path>> cacheCopyTasks = new HashMap<String, FutureTask<Path>>();
+ private final Map<String,FutureTask<Path>> cacheCopyTasks;
+
+ private volatile boolean canceled;
/**
* Creates a new runtime environment object which contains the runtime information for the encapsulated Nephele
@@ -174,8 +177,9 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
*/
public RuntimeEnvironment(final JobID jobID, final String taskName,
final Class<? extends AbstractInvokable> invokableClass, final Configuration taskConfiguration,
- final Configuration jobConfiguration) throws Exception {
-
+ final Configuration jobConfiguration)
+ throws Exception
+ {
this.jobID = jobID;
this.taskName = taskName;
this.invokableClass = invokableClass;
@@ -186,7 +190,8 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
this.memoryManager = null;
this.ioManager = null;
this.inputSplitProvider = null;
-
+ this.cacheCopyTasks = new HashMap<String, FutureTask<Path>>();
+
this.invokable = this.invokableClass.newInstance();
this.invokable.setEnvironment(this);
this.invokable.registerInputOutput();
@@ -433,6 +438,53 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
return this.executingThread;
}
}
+
+ public void cancelExecution() {
+ canceled = true;
+
+ LOG.info("Canceling " + getTaskNameWithIndex());
+
+ // Request user code to shut down
+ if (this.invokable != null) {
+ try {
+ this.invokable.cancel();
+ } catch (Throwable e) {
+ LOG.error("Error while cancelling the task.", e);
+ }
+ }
+
+ // interrupt the running thread and wait for it to die
+ executingThread.interrupt();
+
+ try {
+ executingThread.join(5000);
+ } catch (InterruptedException e) {}
+
+ if (!executingThread.isAlive()) {
+ return;
+ }
+
+ // Continuously interrupt the user thread until it changed to state CANCELED
+ while (executingThread != null && executingThread.isAlive()) {
+ LOG.warn("Task " + getTaskName() + " did not react to cancelling signal. Sending repeated interrupt.");
+
+ if (LOG.isDebugEnabled()) {
+ StringBuilder bld = new StringBuilder("Task ").append(getTaskName()).append(" is stuck in method:\n");
+
+ StackTraceElement[] stack = executingThread.getStackTrace();
+ for (StackTraceElement e : stack) {
+ bld.append(e).append('\n');
+ }
+ LOG.debug(bld.toString());
+ }
+
+ executingThread.interrupt();
+
+ try {
+ executingThread.join(1000);
+ } catch (InterruptedException e) {}
+ }
+ }
/**
* Blocks until all output channels are closed.
@@ -459,7 +511,7 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
*/
private void waitForInputChannelsToBeClosed() throws IOException, InterruptedException {
// Wait for disconnection of all output gates
- while (true) {
+ while (!canceled) {
// Make sure, we leave this method with an InterruptedException when the task has been canceled
if (this.executionObserver.isCanceled()) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
index 5070b51..4db5e14 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/protocols/JobManagerProtocol.java
@@ -51,7 +51,7 @@ public interface JobManagerProtocol extends VersionedProtocol {
* @return whether the task manager was successfully registered
*/
RegisterTaskManagerResult registerTaskManager(InstanceConnectionInfo instanceConnectionInfo,
- HardwareDescription hardwareDescription,IntegerRecord numberOfSlots)
+ HardwareDescription hardwareDescription, IntegerRecord numberOfSlots)
throws IOException;
/**
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/Task.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/Task.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/Task.java
index 825eae1..d1a6275 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/Task.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/Task.java
@@ -24,14 +24,14 @@ import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.profiling.TaskManagerProfiler;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
-import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.util.StringUtils;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.Iterator;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
public final class Task implements ExecutionObserver {
@@ -49,13 +49,14 @@ public final class Task implements ExecutionObserver {
/**
* Stores whether the task has been canceled.
*/
- private volatile boolean isCanceled = false;
+ private final AtomicBoolean canceled = new AtomicBoolean(false);
/**
* The current execution state of the task
*/
private volatile ExecutionState executionState = ExecutionState.STARTING;
+
private Queue<ExecutionListener> registeredListeners = new ConcurrentLinkedQueue<ExecutionListener>();
public Task(ExecutionVertexID vertexID, final RuntimeEnvironment environment, TaskManager taskManager) {
@@ -102,11 +103,11 @@ public final class Task implements ExecutionObserver {
}
public void cancelExecution() {
- cancelOrKillExecution(true);
+ cancelOrKillExecution();
}
public void killExecution() {
- cancelOrKillExecution(false);
+ cancelOrKillExecution();
}
/**
@@ -114,10 +115,8 @@ public final class Task implements ExecutionObserver {
*
* @param cancel <code>true/code> if the task shall be canceled, <code>false</code> if it shall be killed
*/
- private void cancelOrKillExecution(boolean cancel) {
- final Thread executingThread = this.environment.getExecutingThread();
-
- if (executingThread == null) {
+ private void cancelOrKillExecution() {
+ if (!this.canceled.compareAndSet(false, true)) {
return;
}
@@ -125,45 +124,13 @@ public final class Task implements ExecutionObserver {
return;
}
- LOG.info((cancel ? "Canceling " : "Killing ") + this.environment.getTaskNameWithIndex());
-
- if (cancel) {
- this.isCanceled = true;
- // Change state
- executionStateChanged(ExecutionState.CANCELING, null);
-
- // Request user code to shut down
- try {
- final AbstractInvokable invokable = this.environment.getInvokable();
- if (invokable != null) {
- invokable.cancel();
- }
- } catch (Throwable e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- }
-
- // Continuously interrupt the user thread until it changed to state CANCELED
- while (true) {
-
- executingThread.interrupt();
-
- if (!executingThread.isAlive()) {
- break;
- }
-
- try {
- executingThread.join(1000);
- } catch (InterruptedException e) {}
+ executionStateChanged(ExecutionState.CANCELING, null);
- if (!executingThread.isAlive()) {
- break;
- }
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Sending repeated " + (cancel == true ? "canceling" : "killing") + " signal to " +
- this.environment.getTaskName() + " with state " + this.executionState);
- }
+ // Request user code to shut down
+ try {
+ this.environment.cancelExecution();
+ } catch (Throwable e) {
+ LOG.error("Error while cancelling the task.", e);
}
}
@@ -271,7 +238,6 @@ public final class Task implements ExecutionObserver {
* @return the name of the task associated with this observer object
*/
private String getTaskName() {
-
return this.environment.getTaskName() + " (" + (this.environment.getIndexInSubtaskGroup() + 1) + "/"
+ this.environment.getCurrentNumberOfSubtasks() + ")";
}
@@ -279,7 +245,6 @@ public final class Task implements ExecutionObserver {
@Override
public void userThreadStarted(final Thread userThread) {
-
// Notify the listeners
final Iterator<ExecutionListener> it = this.registeredListeners.iterator();
while (it.hasNext()) {
@@ -290,7 +255,6 @@ public final class Task implements ExecutionObserver {
@Override
public void userThreadFinished(final Thread userThread) {
-
// Notify the listeners
final Iterator<ExecutionListener> it = this.registeredListeners.iterator();
while (it.hasNext()) {
@@ -307,7 +271,6 @@ public final class Task implements ExecutionObserver {
*/
public void registerExecutionListener(final ExecutionListener executionListener) {
-
this.registeredListeners.add(executionListener);
}
@@ -320,15 +283,13 @@ public final class Task implements ExecutionObserver {
*/
public void unregisterExecutionListener(final ExecutionListener executionListener) {
-
this.registeredListeners.remove(executionListener);
}
@Override
public boolean isCanceled() {
-
- return this.isCanceled;
+ return this.canceled.get();
}
/**
@@ -337,7 +298,6 @@ public final class Task implements ExecutionObserver {
* @return the runtime environment associated with this task
*/
public RuntimeEnvironment getRuntimeEnvironment() {
-
return this.environment;
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
index 5966cf9..f191df3 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
@@ -164,12 +164,15 @@ public class TaskManager implements TaskOperationProtocol {
private volatile boolean shutdownComplete;
/**
- * Constructs a new task manager, starts its IPC service and attempts to discover the job manager to
- * receive an initial configuration. All parameters are obtained from the
+ * All parameters are obtained from the
* {@link GlobalConfiguration}, which must be loaded prior to instantiating the task manager.
*/
public TaskManager(ExecutionMode executionMode) throws Exception {
-
+ if (executionMode == null) {
+ throw new NullPointerException("Execution mode must not be null.");
+ }
+
+
LOG.info("TaskManager started as user " + UserGroupInformation.getCurrentUser().getShortUserName());
LOG.info("User system property: " + System.getProperty("user.name"));
LOG.info("Execution mode: " + executionMode);
@@ -340,6 +343,7 @@ public class TaskManager implements TaskOperationProtocol {
{
HardwareDescription resources = HardwareDescriptionFactory.extractFromSystem();
+
numberOfSlots = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS,
Hardware.getNumberCPUCores());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
index a8f1331..b491c12 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
@@ -13,21 +13,19 @@
package eu.stratosphere.nephele.instance.local;
-import static org.junit.Assert.fail;
-
import eu.stratosphere.nephele.instance.InstanceManager;
import junit.framework.Assert;
import org.junit.Test;
import eu.stratosphere.nephele.ExecutionMode;
+import eu.stratosphere.configuration.ConfigConstants;
+import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.configuration.GlobalConfiguration;
import eu.stratosphere.nephele.jobmanager.JobManager;
-import eu.stratosphere.nephele.util.ServerTestUtils;
/**
* Tests for the {@link LocalInstanceManager}.
- *
*/
public class LocalInstanceManagerTest {
@@ -39,12 +37,13 @@ public class LocalInstanceManagerTest {
public void testInstanceTypeFromConfiguration() {
try {
- final String configDir = ServerTestUtils.getConfigDir();
- if (configDir == null) {
- fail("Cannot locate configuration directory");
- }
-
- GlobalConfiguration.loadConfiguration(configDir);
+ Configuration cfg = new Configuration();
+ cfg.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "127.0.0.1");
+ cfg.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 6123);
+ cfg.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 1);
+ cfg.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 1);
+
+ GlobalConfiguration.includeConfiguration(cfg);
// start JobManager
ExecutionMode executionMode = ExecutionMode.LOCAL;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
index fa4fbfa..89f7428 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
@@ -20,6 +20,7 @@ import eu.stratosphere.core.fs.Path;
import eu.stratosphere.nephele.ExecutionMode;
import eu.stratosphere.nephele.client.JobClient;
import eu.stratosphere.nephele.client.JobExecutionException;
+import eu.stratosphere.nephele.execution.RuntimeEnvironment;
import eu.stratosphere.nephele.jobgraph.DistributionPattern;
import eu.stratosphere.runtime.io.channels.ChannelType;
import eu.stratosphere.nephele.jobgraph.JobFileInputVertex;
@@ -34,6 +35,7 @@ import eu.stratosphere.nephele.util.FileLineWriter;
import eu.stratosphere.nephele.util.JarFileCreator;
import eu.stratosphere.nephele.util.ServerTestUtils;
import eu.stratosphere.util.LogUtils;
+
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.AfterClass;
@@ -57,7 +59,8 @@ import static org.junit.Assert.fail;
public class JobManagerITCase {
static {
- LogUtils.initializeDefaultTestConsoleLogger();
+ // no logging, because the tests create expected exception
+ LogUtils.initializeDefaultConsoleLogger(Level.INFO);
}
/**
@@ -75,7 +78,13 @@ public class JobManagerITCase {
@BeforeClass
public static void startNephele() {
try {
- GlobalConfiguration.loadConfiguration(ServerTestUtils.getConfigDir());
+ Configuration cfg = new Configuration();
+ cfg.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "127.0.0.1");
+ cfg.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 6123);
+ cfg.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 1);
+ cfg.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 1);
+
+ GlobalConfiguration.includeConfiguration(cfg);
configuration = GlobalConfiguration.getConfiguration(new String[] { ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY });
@@ -301,8 +310,9 @@ public class JobManagerITCase {
// deactivate logging of expected test exceptions
Logger rtLogger = Logger.getLogger(Task.class);
- Level rtLevel = rtLogger.getEffectiveLevel();
rtLogger.setLevel(Level.OFF);
+ Logger envLogger = Logger.getLogger(RuntimeEnvironment.class);
+ envLogger.setLevel(Level.DEBUG);
try {
jobClient.submitJobAndWait();
@@ -317,9 +327,6 @@ public class JobManagerITCase {
return;
}
- finally {
- rtLogger.setLevel(rtLevel);
- }
fail("Expected exception but did not receive it");
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
index 4202880..59de8cc 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
@@ -38,27 +38,9 @@ import eu.stratosphere.nephele.protocols.ExtendedManagementProtocol;
public final class ServerTestUtils {
/**
- * The system property key to retrieve the user directory.
- */
- private static final String USER_DIR_KEY = "user.dir";
-
- /**
- * The directory containing the correct configuration file to be used during the tests.
- */
- private static final String CORRECT_CONF_DIR = "/confs/jobmanager";
-
- /**
- * The directory the configuration directory is expected in when test are executed using Eclipse.
- */
- private static final String ECLIPSE_PATH_EXTENSION = "/src/test/resources";
-
- private static final String INTELLIJ_PATH_EXTENSION = "/stratosphere-runtime/src/test/resources";
-
- /**
* Private constructor.
*/
- private ServerTestUtils() {
- }
+ private ServerTestUtils() {}
/**
* Creates a file with a random name in the given sub directory within the directory for temporary files. The
@@ -182,34 +164,6 @@ public final class ServerTestUtils {
}
/**
- * Returns the directory containing the configuration files that shall be used for the test.
- *
- * @return the directory containing the configuration files or <code>null</code> if the configuration directory
- * could not be located
- */
- public static String getConfigDir() {
-
- // This is the correct path for Maven-based tests
- String configDir = System.getProperty(USER_DIR_KEY) + CORRECT_CONF_DIR;
- if (new File(configDir).exists()) {
- return configDir;
- }
-
- configDir = System.getProperty(USER_DIR_KEY) + ECLIPSE_PATH_EXTENSION + CORRECT_CONF_DIR;
- if (new File(configDir).exists()) {
- return configDir;
- }
-
- configDir = System.getProperty(USER_DIR_KEY) + INTELLIJ_PATH_EXTENSION + CORRECT_CONF_DIR;
-
- if(new File(configDir).exists()){
- return configDir;
- }
-
- return null;
- }
-
- /**
* Waits until the job manager for the tests has become ready to accept jobs.
*
* @param jobManager
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
index 1ee9293..1c6270a 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/netty/InboundEnvelopeDecoderTest.java
@@ -354,7 +354,7 @@ public class InboundEnvelopeDecoderTest {
buf.readerIndex(0);
ByteBuf[] slices = randomSlices(buf);
- ch.writeInbound((Object) slices);
+ ch.writeInbound((Object[]) slices);
for (ByteBuf slice : slices) {
Assert.assertEquals(1, slice.refCnt());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-runtime/src/test/resources/confs/jobmanager/nephele-default.xml
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/resources/confs/jobmanager/nephele-default.xml b/stratosphere-runtime/src/test/resources/confs/jobmanager/nephele-default.xml
deleted file mode 100644
index 5d93d95..0000000
--- a/stratosphere-runtime/src/test/resources/confs/jobmanager/nephele-default.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<configuration>
- <property>
- <key>jobmanager.rpc.address</key>
- <value>127.0.0.1</value>
- </property>
- <property>
- <key>jobmanager.rpc.port</key>
- <value>6123</value>
- </property>
- <property>
- <key>taskmanager.setup.usediscovery</key>
- <value>false</value>
- </property>
- <property>
- <key>discoveryservice.magicnumber</key>
- <value>12300</value>
- </property>
- <property>
- <key>instancemanager.local.classname</key>
- <value>eu.stratosphere.nephele.instance.local.LocalInstanceManager</value>
- </property>
- <property>
- <key>jobmanager.scheduler.local.classname</key>
- <value>eu.stratosphere.nephele.jobmanager.scheduler.local.LocalScheduler</value>
- </property>
- <property>
- <key>channel.network.compressor</key>
- <value>de.tu_berlin.cit.nephele.io.compression.lzo.LzoCompressor</value>
- </property>
- <property>
- <key>channel.network.decompressor</key>
- <value>de.tu_berlin.cit.nephele.io.compression.lzo.LzoDecompressor</value>
- </property>
- <property>
- <key>channel.file.compressor</key>
- <value>de.tu_berlin.cit.nephele.io.compression.lzo.LzoCompressor</value>
- </property>
- <property>
- <key>channel.file.decompressor</key>
- <value>de.tu_berlin.cit.nephele.io.compression.lzo.LzoDecompressor</value>
- </property>
- <property>
- <key>taskmanager.memory.size</key>
- <value>8</value>
- </property>
- <property>
- <key>instancemanager.local.type</key>
- <value>test,4,4,1024,160,0</value>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-tests/src/test/java/eu/stratosphere/test/javaApiOperators/SumMinMaxITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/javaApiOperators/SumMinMaxITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/javaApiOperators/SumMinMaxITCase.java
index 8b7dc80..cef9c05 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/javaApiOperators/SumMinMaxITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/javaApiOperators/SumMinMaxITCase.java
@@ -18,13 +18,14 @@ package eu.stratosphere.test.javaApiOperators;
import eu.stratosphere.api.java.DataSet;
import eu.stratosphere.api.java.ExecutionEnvironment;
-import eu.stratosphere.api.java.aggregation.Aggregations;
import eu.stratosphere.api.java.tuple.Tuple1;
import eu.stratosphere.api.java.tuple.Tuple2;
import eu.stratosphere.api.java.tuple.Tuple3;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.test.javaApiOperators.util.CollectionDataSets;
import eu.stratosphere.test.util.JavaProgramTestBase;
+
+import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.FileNotFoundException;
@@ -32,6 +33,7 @@ import java.io.IOException;
import java.util.Collection;
import java.util.LinkedList;
+@RunWith(Parameterized.class)
public class SumMinMaxITCase extends JavaProgramTestBase {
private static int NUM_PROGRAMS = 3;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
index 0e297ec..17f4a29 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
@@ -14,7 +14,6 @@ package eu.stratosphere.test.localDistributed;
import java.io.File;
import java.io.FileWriter;
-import java.net.URL;
import eu.stratosphere.client.minicluster.NepheleMiniCluster;
import org.junit.Assert;
@@ -29,23 +28,26 @@ import eu.stratosphere.util.LogUtils;
public class PackagedProgramEndToEndITCase {
- private static final int DOP = 4;
-
static {
LogUtils.initializeDefaultTestConsoleLogger();
}
@Test
public void testEverything() {
+ final int PORT = 6498;
+
NepheleMiniCluster cluster = new NepheleMiniCluster();
+
+ File points = null;
+ File clusters = null;
+ File outFile = null;
+
try {
// set up the files
- File points = File.createTempFile("kmeans_points", ".in");
- File clusters = File.createTempFile("kmeans_clusters", ".in");
- File outFile = File.createTempFile("kmeans_result", ".out");
- points.deleteOnExit();
- clusters.deleteOnExit();
- outFile.deleteOnExit();
+ points = File.createTempFile("kmeans_points", ".in");
+ clusters = File.createTempFile("kmeans_clusters", ".in");
+ outFile = File.createTempFile("kmeans_result", ".out");
+
outFile.delete();
FileWriter fwPoints = new FileWriter(points);
@@ -56,31 +58,39 @@ public class PackagedProgramEndToEndITCase {
fwClusters.write(KMeansData.INITIAL_CENTERS);
fwClusters.close();
- URL jarFileURL = getClass().getResource("/KMeansForTest.jar");
- String jarPath = jarFileURL.getFile();
+ String jarPath = "target/maven-test-jar.jar";
// run KMeans
cluster.setNumTaskTracker(2);
cluster.setTaskManagerNumSlots(2);
+ cluster.setJobManagerRpcPort(PORT);
cluster.start();
- RemoteExecutor ex = new RemoteExecutor("localhost", 6498);
+
+ RemoteExecutor ex = new RemoteExecutor("localhost", PORT);
ex.executeJar(jarPath,
- "eu.stratosphere.examples.scala.testing.KMeansForTest",
- new String[] {new Integer(DOP).toString(),
+ "eu.stratosphere.test.util.testjar.KMeansForTest",
+ new String[] {
points.toURI().toString(),
clusters.toURI().toString(),
outFile.toURI().toString(),
"25"});
- points.delete();
- clusters.delete();
- outFile.delete();
-
} catch (Exception e) {
e.printStackTrace();
Assert.fail(e.getMessage());
- } finally {
+ }
+ finally {
+ if (points != null) {
+ points.delete();
+ }
+ if (cluster != null) {
+ clusters.delete();
+ }
+ if (outFile != null) {
+ outFile.delete();
+ }
+
try {
cluster.stop();
} catch (Exception e) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/429493d0/stratosphere-tests/src/test/java/eu/stratosphere/test/util/testjar/KMeansForTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/util/testjar/KMeansForTest.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/util/testjar/KMeansForTest.java
index d1b249a..8047649 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/util/testjar/KMeansForTest.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/util/testjar/KMeansForTest.java
@@ -23,7 +23,6 @@ import eu.stratosphere.api.common.Program;
import eu.stratosphere.api.java.DataSet;
import eu.stratosphere.api.java.ExecutionEnvironment;
import eu.stratosphere.api.java.IterativeDataSet;
-import eu.stratosphere.api.java.RemoteEnvironment;
import eu.stratosphere.api.java.functions.MapFunction;
import eu.stratosphere.api.java.functions.ReduceFunction;
import eu.stratosphere.api.java.tuple.Tuple2;
@@ -31,25 +30,41 @@ import eu.stratosphere.api.java.tuple.Tuple3;
import eu.stratosphere.configuration.Configuration;
@SuppressWarnings("serial")
-public class KMeansForTest implements Program{
+public class KMeansForTest implements Program {
// *************************************************************************
// PROGRAM
// *************************************************************************
+
@Override
public Plan getPlan(String... args) {
- if(!parseParameters(args)) {
- throw new RuntimeException("Unable to parse the arguments");
+ if (args.length < 4) {
+ throw new IllegalArgumentException("Missing parameters");
}
-
- // set up execution environment
- ExecutionEnvironment env = new RemoteEnvironment("localhost", 1);
+
+ final String pointsPath = args[0];
+ final String centersPath = args[1];
+ final String outputPath = args[2];
+ final int numIterations = Integer.parseInt(args[3]);
+
+
+ ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
+ env.setDegreeOfParallelism(4);
// get input data
- DataSet<Point> points = getPointDataSet(env);
- DataSet<Centroid> centroids = getCentroidDataSet(env);
+ DataSet<Point> points = env.readCsvFile(pointsPath)
+ .fieldDelimiter('|')
+ .includeFields(true, true)
+ .types(Double.class, Double.class)
+ .map(new TuplePointConverter());
+
+ DataSet<Centroid> centroids = env.readCsvFile(centersPath)
+ .fieldDelimiter('|')
+ .includeFields(true, true, true)
+ .types(Integer.class, Double.class, Double.class)
+ .map(new TupleCentroidConverter());
// set number of bulk iterations for KMeans algorithm
IterativeDataSet<Centroid> loop = centroids.iterate(numIterations);
@@ -71,11 +86,8 @@ public class KMeansForTest implements Program{
.map(new SelectNearestCenter()).withBroadcastSet(finalCentroids, "centroids");
// emit result
- if(fileOutput) {
- clusteredPoints.writeAsCsv(outputPath, "\n", " ");
- } else {
- clusteredPoints.print();
- }
+ clusteredPoints.writeAsCsv(outputPath, "\n", " ");
+
return env.createProgramPlan();
}
@@ -229,66 +241,4 @@ public class KMeansForTest implements Program{
return new Centroid(value.f0, value.f1.div(value.f2));
}
}
-
- // *************************************************************************
- // UTIL METHODS
- // *************************************************************************
-
- private static boolean fileOutput = false;
- private static String pointsPath = null;
- private static String centersPath = null;
- private static String outputPath = null;
- private static int numIterations = 10;
-
- private static boolean parseParameters(String[] programArguments) {
-
- if(programArguments.length > 0) {
- // parse input arguments
- fileOutput = true;
- if(programArguments.length == 4) {
- pointsPath = programArguments[0];
- centersPath = programArguments[1];
- outputPath = programArguments[2];
- numIterations = Integer.parseInt(programArguments[3]);
- } else {
- System.err.println("Usage: KMeans <points path> <centers path> <result path> <num iterations>");
- return false;
- }
- } else {
- System.out.println("Executing K-Means example with default parameters and built-in default data.");
- System.out.println(" Provide parameters to read input data from files.");
- System.out.println(" See the documentation for the correct format of input files.");
- System.out.println(" We provide a data generator to create synthetic input files for this program.");
- System.out.println(" Usage: KMeans <points path> <centers path> <result path> <num iterations>");
- }
- return true;
- }
-
- private static DataSet<Point> getPointDataSet(ExecutionEnvironment env) {
- if(fileOutput) {
- // read points from CSV file
- return env.readCsvFile(pointsPath)
- .fieldDelimiter('|')
- .includeFields(true, true)
- .types(Double.class, Double.class)
- .map(new TuplePointConverter());
- } else {
- throw new UnsupportedOperationException("Use file output");
- }
- }
-
- private static DataSet<Centroid> getCentroidDataSet(ExecutionEnvironment env) {
- if(fileOutput) {
- return env.readCsvFile(centersPath)
- .fieldDelimiter('|')
- .includeFields(true, true, true)
- .types(Integer.class, Double.class, Double.class)
- .map(new TupleCentroidConverter());
- } else {
- throw new UnsupportedOperationException("Use file output");
- }
- }
-
-
-
}
[08/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/AllocatedSlice.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/AllocatedSlice.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/AllocatedSlice.java
deleted file mode 100644
index 88d71e0..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/AllocatedSlice.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import eu.stratosphere.nephele.instance.AllocationID;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.jobgraph.JobID;
-
-/**
- * An allocated slice is a part of an instance which is assigned to a job.
- * <p>
- * This class is thread-safe.
- *
- */
-class AllocatedSlice {
-
- /**
- * The allocation ID which identifies the resources occupied by this slice.
- */
- private final AllocationID allocationID;
-
- /**
- * The machine hosting the slice.
- */
- private final ClusterInstance hostingInstance;
-
- /**
- * The type describing the characteristics of the allocated slice.
- */
- private final InstanceType type;
-
- /**
- * The ID of the job this slice belongs to.
- */
- private final JobID jobID;
-
- /**
- * Time when this machine has been allocation in milliseconds, {@see currentTimeMillis()}.
- */
- private final long allocationTime;
-
- /**
- * Creates a new allocated slice on the given hosting instance.
- *
- * @param hostingInstance
- * the instance hosting the slice
- * @param type
- * the type describing the characteristics of the allocated slice
- * @param jobID
- * the ID of the job this slice belongs to
- * @param allocationTime
- * the time the instance was allocated
- */
- public AllocatedSlice(final ClusterInstance hostingInstance, final InstanceType type, final JobID jobID,
- final long allocationTime) {
-
- this.allocationID = new AllocationID();
- this.hostingInstance = hostingInstance;
- this.type = type;
- this.jobID = jobID;
- this.allocationTime = allocationTime;
- }
-
- /**
- * Returns the allocation ID of this slice.
- *
- * @return the allocation ID of this slice
- */
- public AllocationID getAllocationID() {
- return this.allocationID;
- }
-
- /**
- * The type describing the characteristics of
- * this allocated slice.
- *
- * @return the type describing the characteristics of the slice
- */
- public InstanceType getType() {
- return this.type;
- }
-
- /**
- * Returns the time the instance was allocated.
- *
- * @return the time the instance was allocated
- */
- public long getAllocationTime() {
- return this.allocationTime;
- }
-
- /**
- * Returns the ID of the job this allocated slice belongs to.
- *
- * @return the ID of the job this allocated slice belongs to
- */
- public JobID getJobID() {
- return this.jobID;
- }
-
- /**
- * Returns the instance hosting this slice.
- *
- * @return the instance hosting this slice
- */
- public ClusterInstance getHostingInstance() {
- return this.hostingInstance;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstance.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstance.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstance.java
deleted file mode 100644
index 5c50bd3..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstance.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.AllocationID;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.nephele.topology.NetworkNode;
-import eu.stratosphere.nephele.topology.NetworkTopology;
-
-/**
- * Representation of a host of a compute cluster.
- * <p>
- * This class is thread-safe.
- *
- */
-class ClusterInstance extends AbstractInstance {
-
- /**
- * A map of slices allocated on this host.
- */
- private final Map<AllocationID, AllocatedSlice> allocatedSlices = new HashMap<AllocationID, AllocatedSlice>();
-
- /**
- * The remaining capacity of this host that can be used by instances.
- */
- private InstanceType remainingCapacity;
-
- /**
- * Time when last heat beat has been received from the task manager running on this instance.
- */
- private long lastReceivedHeartBeat = System.currentTimeMillis();
-
- /**
- * Constructs a new cluster instance.
- *
- * @param instanceConnectionInfo
- * the instance connection info identifying the host
- * @param capacity
- * capacity of this host
- * @param parentNode
- * the parent node of this node in the network topology
- * @param networkTopology
- * the network topology this node is part of
- * @param hardwareDescription
- * the hardware description reported by the instance itself
- */
- public ClusterInstance(final InstanceConnectionInfo instanceConnectionInfo, final InstanceType capacity,
- final NetworkNode parentNode, final NetworkTopology networkTopology,
- final HardwareDescription hardwareDescription) {
-
- super(capacity, instanceConnectionInfo, parentNode, networkTopology, hardwareDescription);
-
- this.remainingCapacity = capacity;
- }
-
- /**
- * Updates the time of last received heart beat to the current system time.
- */
- synchronized void reportHeartBeat() {
- this.lastReceivedHeartBeat = System.currentTimeMillis();
- }
-
- /**
- * Returns whether the host is still alive.
- *
- * @param cleanUpInterval
- * duration (in milliseconds) after which a host is
- * considered dead if it has no received heat-beats.
- * @return <code>true</code> if the host has received a heat-beat before the <code>cleanUpInterval</code> duration
- * has expired, <code>false</code> otherwise
- */
- synchronized boolean isStillAlive(final long cleanUpInterval) {
-
- if (this.lastReceivedHeartBeat + cleanUpInterval < System.currentTimeMillis()) {
- return false;
- }
- return true;
- }
-
- /**
- * Tries to create a new slice on this instance.
- *
- * @param reqType
- * the type describing the hardware characteristics of the slice
- * @param jobID
- * the ID of the job the new slice belongs to
- * @return a new {@AllocatedSlice} object if a slice with the given hardware characteristics could
- * still be accommodated on this instance or <code>null</code> if the instance's remaining resources
- * were insufficient to host the desired slice
- */
- synchronized AllocatedSlice createSlice(final InstanceType reqType, final JobID jobID) {
-
- // check whether we can accommodate the instance
- if (remainingCapacity.getNumberOfComputeUnits() >= reqType.getNumberOfComputeUnits()
- && remainingCapacity.getNumberOfCores() >= reqType.getNumberOfCores()
- && remainingCapacity.getMemorySize() >= reqType.getMemorySize()
- && remainingCapacity.getDiskCapacity() >= reqType.getDiskCapacity()) {
-
- // reduce available capacity by what has been requested
- remainingCapacity = InstanceTypeFactory.construct(remainingCapacity.getIdentifier(), remainingCapacity
- .getNumberOfComputeUnits()
- - reqType.getNumberOfComputeUnits(), remainingCapacity.getNumberOfCores() - reqType.getNumberOfCores(),
- remainingCapacity.getMemorySize() - reqType.getMemorySize(), remainingCapacity.getDiskCapacity()
- - reqType.getDiskCapacity(), remainingCapacity.getPricePerHour());
-
- final long allocationTime = System.currentTimeMillis();
-
- final AllocatedSlice slice = new AllocatedSlice(this, reqType, jobID, allocationTime);
- this.allocatedSlices.put(slice.getAllocationID(), slice);
- return slice;
- }
-
- // we cannot accommodate the instance
- return null;
- }
-
- /**
- * Removes the slice identified by the given allocation ID from
- * this instance and frees up the allocated resources.
- *
- * @param allocationID
- * the allocation ID of the slice to be removed
- * @return the slice with has been removed from the instance or <code>null</code> if no slice
- * with the given allocation ID could be found
- */
- synchronized AllocatedSlice removeAllocatedSlice(final AllocationID allocationID) {
-
- final AllocatedSlice slice = this.allocatedSlices.remove(allocationID);
- if (slice != null) {
-
- this.remainingCapacity = InstanceTypeFactory.construct(this.remainingCapacity.getIdentifier(),
- this.remainingCapacity
- .getNumberOfComputeUnits()
- + slice.getType().getNumberOfComputeUnits(), this.remainingCapacity.getNumberOfCores()
- + slice.getType().getNumberOfCores(), this.remainingCapacity.getMemorySize()
- + slice.getType().getMemorySize(), this.remainingCapacity.getDiskCapacity()
- + slice.getType().getDiskCapacity(), this.remainingCapacity.getPricePerHour());
- }
-
- return slice;
- }
-
- /**
- * Removes all allocated slices on this instance and frees
- * up their allocated resources.
- *
- * @return a list of all removed slices
- */
- synchronized List<AllocatedSlice> removeAllAllocatedSlices() {
-
- final List<AllocatedSlice> slices = new ArrayList<AllocatedSlice>(this.allocatedSlices.values());
- final Iterator<AllocatedSlice> it = slices.iterator();
- while (it.hasNext()) {
- removeAllocatedSlice(it.next().getAllocationID());
- }
-
- return slices;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstanceNotifier.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstanceNotifier.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstanceNotifier.java
deleted file mode 100644
index 39d2132..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterInstanceNotifier.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import java.util.List;
-
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.jobgraph.JobID;
-
-/**
- * This class is an auxiliary class to send the notification
- * about the availability of an {@link AbstractInstance} to the given {@link InstanceListener} object. The notification
- * must be sent from
- * a separate thread, otherwise the atomic operation of requesting an instance
- * for a vertex and switching to the state ASSINING could not be guaranteed.
- * This class is thread-safe.
- *
- */
-public class ClusterInstanceNotifier extends Thread {
-
- /**
- * The {@link InstanceListener} object to send the notification to.
- */
- private final InstanceListener instanceListener;
-
- /**
- * The ID of the job the notification refers to.
- */
- private final JobID jobID;
-
- /**
- * The allocated resources the notification refers to.
- */
- private final List<AllocatedResource> allocatedResources;
-
- /**
- * Constructs a new instance notifier object.
- *
- * @param instanceListener
- * the listener to send the notification to
- * @param jobID
- * the ID of the job the notification refers to
- * @param allocatedResources
- * the resources with has been allocated for the job
- */
- public ClusterInstanceNotifier(final InstanceListener instanceListener, final JobID jobID,
- final List<AllocatedResource> allocatedResources) {
- this.instanceListener = instanceListener;
- this.jobID = jobID;
- this.allocatedResources = allocatedResources;
- }
-
-
- @Override
- public void run() {
-
- this.instanceListener.resourcesAllocated(this.jobID, this.allocatedResources);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterManager.java
deleted file mode 100644
index 480e521..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/ClusterManager.java
+++ /dev/null
@@ -1,945 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import eu.stratosphere.configuration.ConfigConstants;
-import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.configuration.GlobalConfiguration;
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.HardwareDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.instance.InstanceTypeDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.nephele.topology.NetworkNode;
-import eu.stratosphere.nephele.topology.NetworkTopology;
-import eu.stratosphere.nephele.util.SerializableHashMap;
-
-/**
- * Instance Manager for a static cluster.
- * <p>
- * The cluster manager can handle heterogeneous instances (compute nodes). Each instance type used in the cluster must
- * be described in the configuration.
- * <p>
- * This is a sample configuration: <code>
- * # definition of instances in format
- * # instancename,numComputeUnits,numCores,memorySize,diskCapacity,pricePerHour
- * instancemanager.cluster.type.1 = m1.small,2,1,2048,10,10
- * instancemanager.cluster.type. = c1.medium,2,1,2048,10,10
- * instancemanager.cluster.type. = m1.large,4,2,2048,10,10
- * instancemanager.cluster.type. = m1.xlarge,8,4,8192,20,20
- * instancemanager.cluster.type. = c1.xlarge,8,4,16384,20,40
- *
- * # default instance type
- * instancemanager.cluster.defaulttype = 1 (pointing to m1.small)
- * </code> Each instance is expected to run exactly one {@link eu.stratosphere.nephele.taskmanager.TaskManager}. When
- * the {@link eu.stratosphere.nephele.taskmanager.TaskManager} registers with the
- * {@link eu.stratosphere.nephele.jobmanager.JobManager} it sends a {@link HardwareDescription} which describes the
- * actual hardware characteristics of the instance (compute node). The cluster manage will attempt to match the report
- * hardware characteristics with one of the configured instance types. Moreover, the cluster manager is capable of
- * partitioning larger instances (compute nodes) into smaller, less powerful instances.
- */
-public class ClusterManager implements InstanceManager {
-
- // ------------------------------------------------------------------------
- // Internal Constants
- // ------------------------------------------------------------------------
-
- /**
- * The log object used to report debugging and error information.
- */
- private static final Log LOG = LogFactory.getLog(ClusterManager.class);
-
- /**
- * Default duration after which a host is purged in case it did not send
- * a heart-beat message.
- */
- private static final int DEFAULT_CLEANUP_INTERVAL = 2 * 60; // 2 min.
-
- /**
- * The key to retrieve the clean up interval from the configuration.
- */
- private static final String CLEANUP_INTERVAL_KEY = "instancemanager.cluster.cleanupinterval";
-
- // ------------------------------------------------------------------------
- // Fields
- // ------------------------------------------------------------------------
-
- private final Object lock = new Object();
-
- /**
- * Duration after which a host is purged in case it did not send a
- * heart-beat message.
- */
- private final long cleanUpInterval;
-
- /**
- * The default instance type.
- */
- private final InstanceType defaultInstanceType;
-
- /**
- * Set of hosts known to run a task manager that are thus able to execute
- * tasks.
- */
- private final Map<InstanceConnectionInfo, ClusterInstance> registeredHosts;
-
- /**
- * Map of a {@link JobID} to all {@link AllocatedSlice}s that belong to this job.
- */
- private final Map<JobID, List<AllocatedSlice>> slicesOfJobs;
-
- /**
- * List of instance types that can be executed on this cluster, sorted by
- * price (cheapest to most expensive).
- */
- private final InstanceType[] availableInstanceTypes;
-
- /**
- * Map of instance type descriptions which can be queried by the job manager.
- */
- private final Map<InstanceType, InstanceTypeDescription> instanceTypeDescriptionMap;
-
- /**
- * Map of IP addresses to instance types.
- */
- private final Map<InetAddress, InstanceType> ipToInstanceTypeMapping = new HashMap<InetAddress, InstanceType>();
-
- /**
- * Map of pending requests of a job, i.e. the instance requests that could not be fulfilled during the initial
- * instance request.
- */
- private final Map<JobID, PendingRequestsMap> pendingRequestsOfJob = new LinkedHashMap<JobID, PendingRequestsMap>();
-
- /**
- * The network topology of the cluster.
- */
- private final NetworkTopology networkTopology;
-
- /**
- * Object that is notified if instances become available or vanish.
- */
- private InstanceListener instanceListener;
-
- /**
- * Matrix storing how many instances of a particular type and be accommodated in another instance type.
- */
- private final int[][] instanceAccommodationMatrix;
-
- private boolean shutdown;
-
- /**
- * Periodic task that checks whether hosts have not sent their heart-beat
- * messages and purges the hosts in this case.
- */
- private final TimerTask cleanupStaleMachines = new TimerTask() {
-
- @Override
- public void run() {
-
- synchronized (ClusterManager.this.lock) {
-
- final List<Map.Entry<InstanceConnectionInfo, ClusterInstance>> hostsToRemove =
- new ArrayList<Map.Entry<InstanceConnectionInfo, ClusterInstance>>();
-
- final Map<JobID, List<AllocatedResource>> staleResources = new HashMap<JobID, List<AllocatedResource>>();
-
- // check all hosts whether they did not send heat-beat messages.
- for (Map.Entry<InstanceConnectionInfo, ClusterInstance> entry : registeredHosts.entrySet()) {
-
- final ClusterInstance host = entry.getValue();
- if (!host.isStillAlive(cleanUpInterval)) {
-
- // this host has not sent the heat-beat messages
- // -> we terminate all instances running on this host and notify the jobs
- final List<AllocatedSlice> removedSlices = host.removeAllAllocatedSlices();
- for (AllocatedSlice removedSlice : removedSlices) {
-
- final JobID jobID = removedSlice.getJobID();
- final List<AllocatedSlice> slicesOfJob = slicesOfJobs.get(jobID);
- if (slicesOfJob == null) {
- LOG.error("Cannot find allocated slices for job with ID + " + jobID);
- continue;
- }
-
- slicesOfJob.remove(removedSlice);
-
- // Clean up
- if (slicesOfJob.isEmpty()) {
- slicesOfJobs.remove(jobID);
- }
-
- List<AllocatedResource> staleResourcesOfJob = staleResources.get(removedSlice.getJobID());
- if (staleResourcesOfJob == null) {
- staleResourcesOfJob = new ArrayList<AllocatedResource>();
- staleResources.put(removedSlice.getJobID(), staleResourcesOfJob);
- }
-
- staleResourcesOfJob.add(new AllocatedResource(removedSlice.getHostingInstance(),
- removedSlice.getType(),
- removedSlice.getAllocationID()));
- }
-
- hostsToRemove.add(entry);
- }
- }
-
- registeredHosts.entrySet().removeAll(hostsToRemove);
-
- updateInstaceTypeDescriptionMap();
-
- final Iterator<Map.Entry<JobID, List<AllocatedResource>>> it = staleResources.entrySet().iterator();
- while (it.hasNext()) {
- final Map.Entry<JobID, List<AllocatedResource>> entry = it.next();
- if (instanceListener != null) {
- instanceListener.allocatedResourcesDied(entry.getKey(), entry.getValue());
- }
- }
- }
- }
- };
-
- // ------------------------------------------------------------------------
- // Constructor and set-up
- // ------------------------------------------------------------------------
-
- /**
- * Constructor.
- */
- public ClusterManager() {
-
- this.registeredHosts = new HashMap<InstanceConnectionInfo, ClusterInstance>();
-
- this.slicesOfJobs = new HashMap<JobID, List<AllocatedSlice>>();
-
- // Load the instance type this cluster can offer
- this.defaultInstanceType = InstanceTypeFactory.constructFromDescription(ConfigConstants.DEFAULT_INSTANCE_TYPE);
-
- this.availableInstanceTypes = new InstanceType[] { this.defaultInstanceType };
-
- this.instanceAccommodationMatrix = calculateInstanceAccommodationMatrix();
-
- this.instanceTypeDescriptionMap = new SerializableHashMap<InstanceType, InstanceTypeDescription>();
-
- long tmpCleanUpInterval = (long) GlobalConfiguration.getInteger(CLEANUP_INTERVAL_KEY, DEFAULT_CLEANUP_INTERVAL) * 1000;
-
- if (tmpCleanUpInterval < 10) { // Clean up interval must be at least ten seconds
- LOG.warn("Invalid clean up interval. Reverting to default cleanup interval of " + DEFAULT_CLEANUP_INTERVAL
- + " secs.");
- tmpCleanUpInterval = DEFAULT_CLEANUP_INTERVAL;
- }
-
- this.cleanUpInterval = tmpCleanUpInterval;
-
- // sort available instances by CPU core
- sortAvailableInstancesByNumberOfCPUCores();
-
- this.networkTopology = NetworkTopology.createEmptyTopology();
-
- // look every BASEINTERVAL milliseconds for crashed hosts
- final boolean runTimerAsDaemon = true;
- new Timer(runTimerAsDaemon).schedule(cleanupStaleMachines, 1000, 1000);
-
- // Load available instance types into the instance description list
- updateInstaceTypeDescriptionMap();
- }
-
- /**
- * Sorts the list of available instance types by the number of CPU cores in a descending order.
- */
- private void sortAvailableInstancesByNumberOfCPUCores() {
-
- if (this.availableInstanceTypes.length < 2) {
- return;
- }
-
- for (int i = 1; i < this.availableInstanceTypes.length; i++) {
- final InstanceType it = this.availableInstanceTypes[i];
- int j = i;
- while (j > 0 && this.availableInstanceTypes[j - 1].getNumberOfCores() < it.getNumberOfCores()) {
- this.availableInstanceTypes[j] = this.availableInstanceTypes[j - 1];
- --j;
- }
- this.availableInstanceTypes[j] = it;
- }
- }
-
- @Override
- public void shutdown() {
- synchronized (this.lock) {
- if (this.shutdown) {
- return;
- }
-
- this.cleanupStaleMachines.cancel();
-
- Iterator<ClusterInstance> it = this.registeredHosts.values().iterator();
- while (it.hasNext()) {
- it.next().destroyProxies();
- }
- this.registeredHosts.clear();
-
- this.shutdown = true;
- }
- }
-
- @Override
- public InstanceType getDefaultInstanceType() {
- return this.defaultInstanceType;
- }
-
- @Override
- public InstanceType getInstanceTypeByName(String instanceTypeName) {
- synchronized (this.lock) {
- for (InstanceType it : availableInstanceTypes) {
- if (it.getIdentifier().equals(instanceTypeName)) {
- return it;
- }
- }
- }
-
- return null;
- }
-
-
- @Override
- public InstanceType getSuitableInstanceType(int minNumComputeUnits, int minNumCPUCores,
- int minMemorySize, int minDiskCapacity, int maxPricePerHour)
- {
- // the instances are sorted by price -> the first instance that
- // fulfills/ the requirements is suitable and the cheapest
-
- synchronized (this.lock) {
- for (InstanceType i : availableInstanceTypes) {
- if (i.getNumberOfComputeUnits() >= minNumComputeUnits && i.getNumberOfCores() >= minNumCPUCores
- && i.getMemorySize() >= minMemorySize && i.getDiskCapacity() >= minDiskCapacity
- && i.getPricePerHour() <= maxPricePerHour) {
- return i;
- }
- }
- }
- return null;
- }
-
-
- @Override
- public void releaseAllocatedResource(JobID jobID, Configuration conf,
- AllocatedResource allocatedResource) throws InstanceException
- {
- synchronized (this.lock) {
- // release the instance from the host
- final ClusterInstance clusterInstance = (ClusterInstance) allocatedResource.getInstance();
- final AllocatedSlice removedSlice = clusterInstance.removeAllocatedSlice(allocatedResource.getAllocationID());
-
- // remove the local association between instance and job
- final List<AllocatedSlice> slicesOfJob = this.slicesOfJobs.get(jobID);
- if (slicesOfJob == null) {
- LOG.error("Cannot find allocated slice to release allocated slice for job " + jobID);
- return;
- }
-
- slicesOfJob.remove(removedSlice);
-
- // Clean up
- if (slicesOfJob.isEmpty()) {
- this.slicesOfJobs.remove(jobID);
- }
-
- // Check pending requests
- checkPendingRequests();
- }
- }
-
- /**
- * Creates a new {@link ClusterInstance} object to manage instances that can
- * be executed on that host.
- *
- * @param instanceConnectionInfo
- * the connection information for the instance
- * @param hardwareDescription
- * the hardware description provided by the new instance
- * @return a new {@link ClusterInstance} object or <code>null</code> if the cluster instance could not be created
- */
- private ClusterInstance createNewHost(final InstanceConnectionInfo instanceConnectionInfo,
- final HardwareDescription hardwareDescription) {
-
- // Check if there is a user-defined instance type for this IP address
- InstanceType instanceType = this.ipToInstanceTypeMapping.get(instanceConnectionInfo.address());
- if (instanceType != null) {
- LOG.info("Found user-defined instance type for cluster instance with IP "
- + instanceConnectionInfo.address() + ": " + instanceType);
- } else {
- instanceType = matchHardwareDescriptionWithInstanceType(hardwareDescription);
- if (instanceType != null) {
- LOG.info("Hardware profile of cluster instance with IP " + instanceConnectionInfo.address()
- + " matches with instance type " + instanceType);
- } else {
- LOG.error("No matching instance type, cannot create cluster instance");
- return null;
- }
- }
-
- // Try to match new host with a stub host from the existing topology
- String instanceName = instanceConnectionInfo.hostname();
- NetworkNode parentNode = this.networkTopology.getRootNode();
- NetworkNode currentStubNode = null;
-
- // Try to match new host using the host name
- while (true) {
-
- currentStubNode = this.networkTopology.getNodeByName(instanceName);
- if (currentStubNode != null) {
- break;
- }
-
- final int pos = instanceName.lastIndexOf('.');
- if (pos == -1) {
- break;
- }
-
- /*
- * If host name is reported as FQDN, iterative remove parts
- * of the domain name until a match occurs or no more dots
- * can be found in the host name.
- */
- instanceName = instanceName.substring(0, pos);
- }
-
- // Try to match the new host using the IP address
- if (currentStubNode == null) {
- instanceName = instanceConnectionInfo.address().toString();
- instanceName = instanceName.replaceAll("/", ""); // Remove any / characters
- currentStubNode = this.networkTopology.getNodeByName(instanceName);
- }
-
- if (currentStubNode != null) {
- /*
- * The instance name will be the same as the one of the stub node. That way
- * the stub now will be removed from the network topology and replaced be
- * the new node.
- */
- if (currentStubNode.getParentNode() != null) {
- parentNode = currentStubNode.getParentNode();
- }
- // Remove the stub node from the tree
- currentStubNode.remove();
- }
-
- LOG.info("Creating instance of type " + instanceType + " for " + instanceConnectionInfo + ", parent is "
- + parentNode.getName());
- final ClusterInstance host = new ClusterInstance(instanceConnectionInfo, instanceType, parentNode,
- this.networkTopology, hardwareDescription);
-
- return host;
- }
-
- /**
- * Attempts to match the hardware characteristics provided by the {@link HardwareDescription} object with one
- * of the instance types set in the configuration. The matching is pessimistic, i.e. the hardware characteristics of
- * the chosen instance type never exceed the actually reported characteristics from the hardware description.
- *
- * @param hardwareDescription
- * the hardware description as reported by the instance
- * @return the best matching instance type or <code>null</code> if no matching instance type can be found
- */
- private InstanceType matchHardwareDescriptionWithInstanceType(final HardwareDescription hardwareDescription) {
-
- // Assumes that the available instance types are ordered by number of CPU cores in descending order
- for (int i = 0; i < this.availableInstanceTypes.length; i++) {
-
- final InstanceType candidateInstanceType = this.availableInstanceTypes[i];
- // Check if number of CPU cores match
- if (candidateInstanceType.getNumberOfCores() > hardwareDescription.getNumberOfCPUCores()) {
- continue;
- }
-
- // Check if size of physical memory matches
- final int memoryInMB = (int) (hardwareDescription.getSizeOfPhysicalMemory() / (1024L * 1024L));
- if (candidateInstanceType.getMemorySize() > memoryInMB) {
- continue;
- }
-
- return candidateInstanceType;
- }
-
- LOG.error("Cannot find matching instance type for hardware description ("
- + hardwareDescription.getNumberOfCPUCores() + " cores, " + hardwareDescription.getSizeOfPhysicalMemory()
- + " bytes of memory)");
-
- return null;
- }
-
-
- @Override
- public void reportHeartBeat(InstanceConnectionInfo instanceConnectionInfo, HardwareDescription hardwareDescription) {
-
- synchronized (this.lock) {
- ClusterInstance host = registeredHosts.get(instanceConnectionInfo);
-
- // check whether we have discovered a new host
- if (host == null) {
- host = createNewHost(instanceConnectionInfo, hardwareDescription);
-
- if (host == null) {
- LOG.error("Could not create a new host object for incoming heart-beat. "
- + "Probably the configuration file is lacking some entries.");
- return;
- }
-
- this.registeredHosts.put(instanceConnectionInfo, host);
- LOG.info("New number of registered hosts is " + this.registeredHosts.size());
-
- // Update the list of instance type descriptions
- updateInstaceTypeDescriptionMap();
-
- // Check if a pending request can be fulfilled by the new host
- checkPendingRequests();
- }
-
- host.reportHeartBeat();
- }
- }
-
- /**
- * Checks if a pending request can be fulfilled.
- */
- private void checkPendingRequests() {
-
- final Iterator<Map.Entry<JobID, PendingRequestsMap>> it = this.pendingRequestsOfJob.entrySet().iterator();
- while (it.hasNext()) {
-
- final List<AllocatedResource> allocatedResources = new ArrayList<AllocatedResource>();
- final Map.Entry<JobID, PendingRequestsMap> entry = it.next();
- final JobID jobID = entry.getKey();
- final PendingRequestsMap pendingRequestsMap = entry.getValue();
- final Iterator<Map.Entry<InstanceType, Integer>> it2 = pendingRequestsMap.iterator();
- while (it2.hasNext()) {
-
- final Map.Entry<InstanceType, Integer> entry2 = it2.next();
- final InstanceType requestedInstanceType = entry2.getKey();
- int numberOfPendingInstances = entry2.getValue().intValue();
-
- // Consistency check
- if (numberOfPendingInstances <= 0) {
- LOG.error("Inconsistency: Job " + jobID + " has " + numberOfPendingInstances
- + " requests for instance type " + requestedInstanceType.getIdentifier());
- continue;
- }
-
- while (numberOfPendingInstances > 0) {
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Trying to allocate instance of type " + requestedInstanceType.getIdentifier());
- }
-
- // TODO: Introduce topology awareness here
- final AllocatedSlice slice = getSliceOfType(jobID, requestedInstanceType);
- if (slice == null) {
- break;
- } else {
-
- LOG.info("Allocated instance of type " + requestedInstanceType.getIdentifier()
- + " as a result of pending request for job " + jobID);
-
- // Decrease number of pending instances
- --numberOfPendingInstances;
- pendingRequestsMap.decreaseNumberOfPendingInstances(requestedInstanceType);
-
- List<AllocatedSlice> allocatedSlices = this.slicesOfJobs.get(jobID);
- if (allocatedSlices == null) {
- allocatedSlices = new ArrayList<AllocatedSlice>();
- this.slicesOfJobs.put(jobID, allocatedSlices);
- }
- allocatedSlices.add(slice);
-
- allocatedResources.add(new AllocatedResource(slice.getHostingInstance(), slice.getType(), slice
- .getAllocationID()));
- }
- }
- }
-
- if (!allocatedResources.isEmpty() && this.instanceListener != null) {
-
- final ClusterInstanceNotifier clusterInstanceNotifier = new ClusterInstanceNotifier(
- this.instanceListener, jobID, allocatedResources);
-
- clusterInstanceNotifier.start();
- }
- }
- }
-
- /**
- * Attempts to allocate a slice of the given type for the given job. The method first attempts to allocate this
- * slice by finding a physical host which exactly matches the given instance type. If this attempt failed, it tries
- * to allocate the slice by partitioning the resources of a more powerful host.
- *
- * @param jobID
- * the ID of the job the slice shall be allocated for
- * @param instanceType
- * the instance type of the requested slice
- * @return the allocated slice or <code>null</code> if no such slice could be allocated
- */
- private AllocatedSlice getSliceOfType(final JobID jobID, final InstanceType instanceType) {
-
- AllocatedSlice slice = null;
-
- // Try to match the instance type without slicing first
- for (final ClusterInstance host : this.registeredHosts.values()) {
- if (host.getType().equals(instanceType)) {
- slice = host.createSlice(instanceType, jobID);
- if (slice != null) {
- break;
- }
- }
- }
-
- // Use slicing now if necessary
- if (slice == null) {
-
- for (final ClusterInstance host : this.registeredHosts.values()) {
- slice = host.createSlice(instanceType, jobID);
- if (slice != null) {
- break;
- }
- }
- }
-
- return slice;
- }
-
-
- @Override
- public void requestInstance(JobID jobID, Configuration conf, InstanceRequestMap instanceRequestMap, List<String> splitAffinityList)
- throws InstanceException
- {
- final List<AllocatedSlice> newlyAllocatedSlicesOfJob = new ArrayList<AllocatedSlice>();
- final Map<InstanceType, Integer> pendingRequests = new HashMap<InstanceType, Integer>();
-
- synchronized(this.lock) {
- // Iterate over all instance types
- for (Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMaximumIterator(); it.hasNext();) {
-
- // Iterate over all requested instances of a specific type
- final Map.Entry<InstanceType, Integer> entry = it.next();
- final int maximumNumberOfInstances = entry.getValue().intValue();
-
- for (int i = 0; i < maximumNumberOfInstances; i++) {
-
- LOG.info("Trying to allocate instance of type " + entry.getKey().getIdentifier());
-
- final AllocatedSlice slice = getSliceOfType(jobID, entry.getKey());
-
- if (slice == null) {
- if (i < instanceRequestMap.getMinimumNumberOfInstances(entry.getKey())) {
- // The request cannot be fulfilled, release the slices again and throw an exception
- for (final AllocatedSlice sliceToRelease : newlyAllocatedSlicesOfJob) {
- sliceToRelease.getHostingInstance().removeAllocatedSlice(sliceToRelease.getAllocationID());
- }
-
- // TODO: Remove previously allocated slices again
- throw new InstanceException("Could not find a suitable instance");
- } else {
-
- // Remaining instances are pending
- final int numberOfRemainingInstances = maximumNumberOfInstances - i;
- if (numberOfRemainingInstances > 0) {
-
- // Store the request for the missing instances
- Integer val = pendingRequests.get(entry.getKey());
- if (val == null) {
- val = Integer.valueOf(0);
- }
- val = Integer.valueOf(val.intValue() + numberOfRemainingInstances);
- pendingRequests.put(entry.getKey(), val);
- }
-
- break;
- }
- }
-
- newlyAllocatedSlicesOfJob.add(slice);
- }
- }
-
- // The request could be processed successfully, so update internal bookkeeping.
- List<AllocatedSlice> allAllocatedSlicesOfJob = this.slicesOfJobs.get(jobID);
- if (allAllocatedSlicesOfJob == null) {
- allAllocatedSlicesOfJob = new ArrayList<AllocatedSlice>();
- this.slicesOfJobs.put(jobID, allAllocatedSlicesOfJob);
- }
- allAllocatedSlicesOfJob.addAll(newlyAllocatedSlicesOfJob);
-
- PendingRequestsMap allPendingRequestsOfJob = this.pendingRequestsOfJob.get(jobID);
- if (allPendingRequestsOfJob == null) {
- allPendingRequestsOfJob = new PendingRequestsMap();
- this.pendingRequestsOfJob.put(jobID, allPendingRequestsOfJob);
- }
- for (final Iterator<Map.Entry<InstanceType, Integer>> it = pendingRequests.entrySet().iterator(); it.hasNext();) {
- final Map.Entry<InstanceType, Integer> entry = it.next();
-
- allPendingRequestsOfJob.addRequest(entry.getKey(), entry.getValue().intValue());
- }
-
- // Finally, create the list of allocated resources for the scheduler
- final List<AllocatedResource> allocatedResources = new ArrayList<AllocatedResource>();
- for (final AllocatedSlice slice : newlyAllocatedSlicesOfJob) {
- allocatedResources.add(new AllocatedResource(slice.getHostingInstance(), slice.getType(), slice
- .getAllocationID()));
- }
-
- if (this.instanceListener != null) {
- final ClusterInstanceNotifier clusterInstanceNotifier = new ClusterInstanceNotifier(
- this.instanceListener, jobID, allocatedResources);
- clusterInstanceNotifier.start();
- }
- }
- }
-
-
- @Override
- public NetworkTopology getNetworkTopology(JobID jobID) {
- return this.networkTopology;
- }
-
-
- @Override
- public void setInstanceListener(InstanceListener instanceListener) {
- synchronized (this.lock) {
- this.instanceListener = instanceListener;
- }
- }
-
-
- @Override
- public Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes() {
- Map<InstanceType, InstanceTypeDescription> copyToReturn = new SerializableHashMap<InstanceType, InstanceTypeDescription>();
- synchronized (this.lock) {
- copyToReturn.putAll(this.instanceTypeDescriptionMap);
- }
- return copyToReturn;
- }
-
- /**
- * Updates the list of instance type descriptions based on the currently registered hosts.
- */
- private void updateInstaceTypeDescriptionMap() {
-
- // this.registeredHosts.values().iterator()
- this.instanceTypeDescriptionMap.clear();
-
- final List<InstanceTypeDescription> instanceTypeDescriptionList = new ArrayList<InstanceTypeDescription>();
-
- // initialize array which stores the availability counter for each instance type
- final int[] numberOfInstances = new int[this.availableInstanceTypes.length];
- for (int i = 0; i < numberOfInstances.length; i++) {
- numberOfInstances[i] = 0;
- }
-
- // Shuffle through instance types
- for (int i = 0; i < this.availableInstanceTypes.length; i++) {
-
- final InstanceType currentInstanceType = this.availableInstanceTypes[i];
- int numberOfMatchingInstances = 0;
- int minNumberOfCPUCores = Integer.MAX_VALUE;
- long minSizeOfPhysicalMemory = Long.MAX_VALUE;
- long minSizeOfFreeMemory = Long.MAX_VALUE;
- final Iterator<ClusterInstance> it = this.registeredHosts.values().iterator();
- while (it.hasNext()) {
- final ClusterInstance clusterInstance = it.next();
- if (clusterInstance.getType().equals(currentInstanceType)) {
- ++numberOfMatchingInstances;
- final HardwareDescription hardwareDescription = clusterInstance.getHardwareDescription();
- minNumberOfCPUCores = Math.min(minNumberOfCPUCores, hardwareDescription.getNumberOfCPUCores());
- minSizeOfPhysicalMemory = Math.min(minSizeOfPhysicalMemory,
- hardwareDescription.getSizeOfPhysicalMemory());
- minSizeOfFreeMemory = Math.min(minSizeOfFreeMemory, hardwareDescription.getSizeOfFreeMemory());
- }
- }
-
- // Update number of instances
- int highestAccommodationNumber = -1;
- int highestAccommodationIndex = -1;
- for (int j = 0; j < this.availableInstanceTypes.length; j++) {
- final int accommodationNumber = canBeAccommodated(j, i);
- // LOG.debug(this.availableInstanceTypes[j].getIdentifier() + " fits into "
- // + this.availableInstanceTypes[i].getIdentifier() + " " + accommodationNumber + " times");
- if (accommodationNumber > 0) {
- numberOfInstances[j] += numberOfMatchingInstances * accommodationNumber;
- if (accommodationNumber > highestAccommodationNumber) {
- highestAccommodationNumber = accommodationNumber;
- highestAccommodationIndex = j;
- }
- }
- }
-
- // Calculate hardware description
- HardwareDescription pessimisticHardwareDescription = null;
- if (minNumberOfCPUCores < Integer.MAX_VALUE && minSizeOfPhysicalMemory < Long.MAX_VALUE
- && minSizeOfFreeMemory < Long.MAX_VALUE) {
-
- pessimisticHardwareDescription = HardwareDescriptionFactory.construct(minNumberOfCPUCores,
- minSizeOfPhysicalMemory, minSizeOfFreeMemory);
-
- } else {
-
- if (highestAccommodationIndex < i) { // Since highestAccommodationIndex smaller than my index, the
- // target instance must be more powerful
-
- final InstanceTypeDescription descriptionOfLargerInstanceType = instanceTypeDescriptionList
- .get(highestAccommodationIndex);
- if (descriptionOfLargerInstanceType.getHardwareDescription() != null) {
- final HardwareDescription hardwareDescriptionOfLargerInstanceType = descriptionOfLargerInstanceType
- .getHardwareDescription();
-
- final int numCores = hardwareDescriptionOfLargerInstanceType.getNumberOfCPUCores()
- / highestAccommodationNumber;
- final long physMem = hardwareDescriptionOfLargerInstanceType.getSizeOfPhysicalMemory()
- / highestAccommodationNumber;
- final long freeMem = hardwareDescriptionOfLargerInstanceType.getSizeOfFreeMemory()
- / highestAccommodationNumber;
-
- pessimisticHardwareDescription = HardwareDescriptionFactory.construct(numCores, physMem,
- freeMem);
- }
- }
- }
-
- instanceTypeDescriptionList.add(InstanceTypeDescriptionFactory.construct(currentInstanceType,
- pessimisticHardwareDescription, numberOfInstances[i]));
- }
-
- final Iterator<InstanceTypeDescription> it = instanceTypeDescriptionList.iterator();
- while (it.hasNext()) {
-
- final InstanceTypeDescription itd = it.next();
- this.instanceTypeDescriptionMap.put(itd.getInstanceType(), itd);
- }
- }
-
- /**
- * Calculates the instance accommodation matrix which stores how many times a particular instance type can be
- * accommodated inside another instance type based on the list of available instance types.
- *
- * @return the instance accommodation matrix
- */
- private int[][] calculateInstanceAccommodationMatrix() {
-
- if (this.availableInstanceTypes == null) {
- LOG.error("Cannot compute instance accommodation matrix: availableInstanceTypes is null");
- return null;
- }
-
- final int matrixSize = this.availableInstanceTypes.length;
- final int[][] am = new int[matrixSize][matrixSize];
-
- // Populate matrix
- for (int i = 0; i < matrixSize; i++) {
- for (int j = 0; j < matrixSize; j++) {
-
- if (i == j) {
- am[i][j] = 1;
- } else {
-
- final InstanceType sourceType = this.availableInstanceTypes[i];
- InstanceType targetType = this.availableInstanceTypes[j];
-
- // How many times can we accommodate source type into target type?
- final int cores = targetType.getNumberOfCores() / sourceType.getNumberOfCores();
- final int cu = targetType.getNumberOfComputeUnits() / sourceType.getNumberOfComputeUnits();
- final int mem = targetType.getMemorySize() / sourceType.getMemorySize();
- final int disk = targetType.getDiskCapacity() / sourceType.getDiskCapacity();
-
- am[i][j] = Math.min(cores, Math.min(cu, Math.min(mem, disk)));
- }
- }
- }
-
- return am;
- }
-
- /**
- * Returns how many times the instance type stored at index <code>sourceTypeIndex</code> can be accommodated inside
- * the instance type stored at index <code>targetTypeIndex</code> in the list of available instance types.
- *
- * @param sourceTypeIndex
- * the index of the source instance type in the list of available instance types
- * @param targetTypeIndex
- * the index of the target instance type in the list of available instance types
- * @return the number of times the source type instance can be accommodated inside the target instance
- */
- private int canBeAccommodated(int sourceTypeIndex, int targetTypeIndex) {
-
- if (sourceTypeIndex >= this.availableInstanceTypes.length
- || targetTypeIndex >= this.availableInstanceTypes.length) {
- LOG.error("Cannot determine number of instance accomodations: invalid index");
- return 0;
- }
-
- return this.instanceAccommodationMatrix[targetTypeIndex][sourceTypeIndex];
- }
-
-
- @Override
- public AbstractInstance getInstanceByName(String name) {
- if (name == null) {
- throw new IllegalArgumentException("Argument name must not be null");
- }
-
- synchronized (this.lock) {
- final Iterator<ClusterInstance> it = this.registeredHosts.values().iterator();
- while (it.hasNext()) {
- final AbstractInstance instance = it.next();
- if (name.equals(instance.getName())) {
- return instance;
- }
- }
- }
-
- return null;
- }
-
-
- @Override
- public void cancelPendingRequests(JobID jobID) {
- synchronized (this.lock) {
- this.pendingRequestsOfJob.remove(jobID);
- }
- }
-
- @Override
- public int getNumberOfTaskTrackers() {
- return this.registeredHosts.size();
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMap.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMap.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMap.java
deleted file mode 100644
index ddc90e9..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMap.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-
-import eu.stratosphere.nephele.instance.InstanceType;
-
-/**
- * This class represents a pending request, i.e. a request for a particular type and number of {@link AbstractInstance}
- * objects which could not be fulfilled yet.
- * <p>
- * This class is not thread-safe.
- *
- */
-public final class PendingRequestsMap {
-
- /**
- * The map storing the pending instance requests for the job this pending request object belongs to.
- */
- private final Map<InstanceType, Integer> pendingRequests = new HashMap<InstanceType, Integer>();
-
- /**
- * Checks if the job this object belongs to has pending instance requests.
- *
- * @return <code>true</code> if the job this object belongs to has pending instance requests, <code>false</code>
- * otherwise
- */
- boolean hasPendingRequests() {
-
- return !(this.pendingRequests.isEmpty());
- }
-
- /**
- * Adds the a pending request for the given number of instances of the given type to this map.
- *
- * @param instanceType
- * the requested instance type
- * @param numberOfInstances
- * the requested number of instances of this type
- */
- void addRequest(final InstanceType instanceType, final int numberOfInstances) {
-
- Integer numberOfRemainingInstances = this.pendingRequests.get(instanceType);
- if (numberOfRemainingInstances == null) {
- numberOfRemainingInstances = Integer.valueOf(numberOfInstances);
- } else {
- numberOfRemainingInstances = Integer.valueOf(numberOfRemainingInstances.intValue() + numberOfInstances);
- }
-
- this.pendingRequests.put(instanceType, numberOfRemainingInstances);
- }
-
- /**
- * Returns an iterator for the pending requests encapsulated in this map.
- *
- * @return an iterator for the pending requests encapsulated in this map
- */
- Iterator<Map.Entry<InstanceType, Integer>> iterator() {
-
- return this.pendingRequests.entrySet().iterator();
- }
-
- /**
- * Decreases the number of remaining instances to request of the given type.
- *
- * @param instanceType
- * the instance type for which the number of remaining instances shall be decreased
- */
- void decreaseNumberOfPendingInstances(final InstanceType instanceType) {
-
- Integer numberOfRemainingInstances = this.pendingRequests.get(instanceType);
- if (numberOfRemainingInstances == null) {
- return;
- }
-
- numberOfRemainingInstances = Integer.valueOf(numberOfRemainingInstances.intValue() - 1);
- if (numberOfRemainingInstances.intValue() == 0) {
- this.pendingRequests.remove(instanceType);
- } else {
- this.pendingRequests.put(instanceType, numberOfRemainingInstances);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstance.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstance.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstance.java
deleted file mode 100644
index 795889e..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstance.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.local;
-
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.topology.NetworkNode;
-import eu.stratosphere.nephele.topology.NetworkTopology;
-
-public class LocalInstance extends AbstractInstance {
-
- public LocalInstance(InstanceType instanceType, InstanceConnectionInfo instanceConnectionInfo,
- NetworkNode parentNode, NetworkTopology networkTopology, HardwareDescription hardwareDescription) {
- super(instanceType, instanceConnectionInfo, parentNode, networkTopology, hardwareDescription);
- }
-
-
- @Override
- public String toString() {
-
- return this.getInstanceConnectionInfo().toString();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceManager.java
deleted file mode 100644
index e888b3f..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceManager.java
+++ /dev/null
@@ -1,418 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.local;
-
-import java.io.File;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Iterator;
-
-import eu.stratosphere.nephele.ExecutionMode;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import eu.stratosphere.configuration.ConfigConstants;
-import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.configuration.GlobalConfiguration;
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.AllocationID;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.HardwareDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.instance.InstanceTypeDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.nephele.taskmanager.TaskManager;
-import eu.stratosphere.nephele.topology.NetworkTopology;
-import eu.stratosphere.nephele.util.SerializableHashMap;
-
-/**
- * The local instance manager is designed to manage instance allocation/deallocation for a single-node setup. It spans a
- * task manager which is executed within the same process as the job manager. Moreover, it determines the hardware
- * characteristics of the machine it runs on and generates a default instance type with the identifier "default". If
- * desired this default instance type can also be overwritten.
- */
-public class LocalInstanceManager implements InstanceManager {
-
- /**
- * The log object used to report events and errors.
- */
- private static final Log LOG = LogFactory.getLog(LocalInstanceManager.class);
-
- /**
- * The key for the configuration parameter defining the instance type to be used by the local instance manager. If
- * the parameter is not set, a default instance type with the identifier "default" is generated from the machine's
- * hardware characteristics.
- */
-
- private static final String LOCALINSTANCE_TYPE_KEY = "instancemanager.local.type";
-
- private static final int SLEEP_TIME = 50;
-
- private static final int START_STOP_TIMEOUT = 2000;
-
-
- /**
- * The instance listener registered with this instance manager.
- */
- private InstanceListener instanceListener;
-
- /**
- * The default instance type which is either generated from the hardware characteristics of the machine the local
- * instance manager runs on or read from the configuration.
- */
- private final InstanceType defaultInstanceType;
-
- /**
- * A synchronization object to protect critical sections.
- */
- private final Object synchronizationObject = new Object();
-
- /**
- * Stores which task manager is currently occupied by a job.
- */
- private Map<LocalInstance, AllocatedResource> allocatedResources = new HashMap<LocalInstance, AllocatedResource>();
-
- /**
- * The local instances encapsulating the task managers
- */
- private Map<InstanceConnectionInfo, LocalInstance> localInstances = new HashMap<InstanceConnectionInfo,
- LocalInstance>();
-
- /**
- * The threads running the local task managers.
- */
- private final List<TaskManager> taskManagers = new ArrayList<TaskManager>();
-
- /**
- * The network topology the local instance is part of.
- */
- private final NetworkTopology networkTopology;
-
- /**
- * The map of instance type descriptions.
- */
- private final Map<InstanceType, InstanceTypeDescription> instanceTypeDescriptionMap;
-
- /**
- * Number of task managers
- */
- private final int numTaskManagers;
-
-
-
-
- /**
- * Constructs a new local instance manager.
- *
- */
- public LocalInstanceManager() throws Exception {
-
- final Configuration config = GlobalConfiguration.getConfiguration();
-
- // get the default instance type
- InstanceType type = null;
- final String descr = config.getString(LOCALINSTANCE_TYPE_KEY, null);
- if (descr != null) {
- LOG.info("Attempting to parse default instance type from string " + descr);
- type = InstanceTypeFactory.constructFromDescription(descr);
- if (type == null) {
- LOG.warn("Unable to parse default instance type from configuration, using hardware profile instead");
- }
- }
-
- this.defaultInstanceType = (type != null) ? type : createDefaultInstanceType();
-
- LOG.info("Default instance type is " + this.defaultInstanceType.getIdentifier());
-
- this.networkTopology = NetworkTopology.createEmptyTopology();
-
- this.instanceTypeDescriptionMap = new SerializableHashMap<InstanceType, InstanceTypeDescription>();
-
- numTaskManagers = GlobalConfiguration.getInteger(ConfigConstants
- .LOCAL_INSTANCE_MANAGER_NUMBER_TASK_MANAGER, 1);
-
- ExecutionMode executionMode = (numTaskManagers > 1) ? ExecutionMode.CLUSTER : ExecutionMode.LOCAL;
-
- for(int i=0; i< numTaskManagers; i++){
-
- Configuration tm = new Configuration();
- int ipcPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_IPC_PORT_KEY,
- ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT);
- int dataPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY,
- ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT);
-
- tm.setInteger(ConfigConstants.TASK_MANAGER_IPC_PORT_KEY, ipcPort + i);
- tm.setInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, dataPort + i);
-
- GlobalConfiguration.includeConfiguration(tm);
-
- TaskManager t = new TaskManager(executionMode);
- taskManagers.add(t);
- }
- }
-
-
- @Override
- public InstanceType getDefaultInstanceType() {
- return this.defaultInstanceType;
- }
-
-
- @Override
- public InstanceType getInstanceTypeByName(String instanceTypeName) {
- if (this.defaultInstanceType.getIdentifier().equals(instanceTypeName)) {
- return this.defaultInstanceType;
- }
-
- return null;
- }
-
-
- @Override
- public InstanceType getSuitableInstanceType(int minNumComputeUnits, int minNumCPUCores,
- int minMemorySize, int minDiskCapacity, int maxPricePerHour) {
-
- if (minNumComputeUnits > this.defaultInstanceType.getNumberOfComputeUnits()) {
- return null;
- }
-
- if (minNumCPUCores > this.defaultInstanceType.getNumberOfCores()) {
- return null;
- }
-
- if (minMemorySize > this.defaultInstanceType.getMemorySize()) {
- return null;
- }
-
- if (minDiskCapacity > this.defaultInstanceType.getDiskCapacity()) {
- return null;
- }
-
- if (maxPricePerHour > this.defaultInstanceType.getPricePerHour()) {
- return null;
- }
-
- return this.defaultInstanceType;
- }
-
-
- @Override
- public void releaseAllocatedResource(final JobID jobID, final Configuration conf,
- final AllocatedResource allocatedResource)
- throws InstanceException {
- LocalInstance instance = (LocalInstance) allocatedResource.getInstance();
-
- synchronized (this.synchronizationObject) {
- if(allocatedResources.containsKey(allocatedResource.getInstance())){
- if(allocatedResources.get(instance).equals(allocatedResource)){
- allocatedResources.remove(instance);
- return;
- }
- }
- throw new InstanceException("Resource with allocation ID " + allocatedResource.getAllocationID()
- + " has not been allocated to job with ID " + jobID
- + " according to the local instance manager's internal bookkeeping");
-
- }
- }
-
-
- @Override
- public void reportHeartBeat(final InstanceConnectionInfo instanceConnectionInfo,
- final HardwareDescription hardwareDescription) {
-
- synchronized (this.synchronizationObject) {
- if(!localInstances.containsKey(instanceConnectionInfo)){
- LocalInstance localInstance = new LocalInstance(this.defaultInstanceType, instanceConnectionInfo,
- this.networkTopology.getRootNode(), this.networkTopology, hardwareDescription);
- localInstances.put(instanceConnectionInfo, localInstance);
-
- this.instanceTypeDescriptionMap.put(this.defaultInstanceType, InstanceTypeDescriptionFactory
- .construct(this.defaultInstanceType, hardwareDescription, localInstances.size()));
- }
- }
- }
-
-
- @Override
- public void shutdown() {
- // Stop the task managers
- for(TaskManager t : taskManagers){
- t.shutdown();
- }
-
- boolean areAllTaskManagerShutdown = false;
- int timeout = START_STOP_TIMEOUT * this.taskManagers.size();
-
- for(int sleep = 0; sleep < timeout; sleep += SLEEP_TIME){
- areAllTaskManagerShutdown = true;
-
- for(TaskManager t: taskManagers){
- if(!t.isShutDown()){
- areAllTaskManagerShutdown = false;
- break;
- }
- }
-
- if(areAllTaskManagerShutdown){
- break;
- }
-
- try {
- Thread.sleep(SLEEP_TIME);
- }catch(InterruptedException e){
- break;
- }
- }
-
- if(!areAllTaskManagerShutdown){
- throw new RuntimeException(String.format("TaskManager shut down timed out (%d ms).", timeout));
- }
-
- instanceTypeDescriptionMap.clear();
-
- synchronized(this.synchronizationObject){
- for(LocalInstance instance: this.localInstances.values()){
- instance.destroyProxies();
- }
-
- localInstances.clear();
- }
- }
-
-
- @Override
- public NetworkTopology getNetworkTopology(final JobID jobID) {
- return this.networkTopology;
- }
-
-
- @Override
- public void setInstanceListener(final InstanceListener instanceListener) {
- this.instanceListener = instanceListener;
- }
-
- /**
- * Creates a default instance type based on the hardware characteristics of the machine that calls this method. The
- * default instance type contains the machine's number of CPU cores and size of physical memory. The disc capacity
- * is calculated from the free space in the directory for temporary files.
- *
- * @return the default instance type used for the local machine
- */
- public static final InstanceType createDefaultInstanceType() {
- final HardwareDescription hardwareDescription = HardwareDescriptionFactory.extractFromSystem();
-
- int diskCapacityInGB = 0;
- final String[] tempDirs = GlobalConfiguration.getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY,
- ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH).split(File.pathSeparator);
-
- for (final String tempDir : tempDirs) {
- if (tempDir != null) {
- File f = new File(tempDir);
- diskCapacityInGB = Math.max(diskCapacityInGB, (int) (f.getFreeSpace() / (1024L * 1024L * 1024L)));
- }
- }
-
- final int physicalMemory = (int) (hardwareDescription.getSizeOfPhysicalMemory() / (1024L * 1024L));
-
- return InstanceTypeFactory.construct("default", hardwareDescription.getNumberOfCPUCores(),
- hardwareDescription.getNumberOfCPUCores(), physicalMemory, diskCapacityInGB, 0);
- }
-
-
- @Override
- public Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes() {
- return this.instanceTypeDescriptionMap;
- }
-
- @Override
- public void requestInstance(final JobID jobID, final Configuration conf,
- final InstanceRequestMap instanceRequestMap,
- final List<String> splitAffinityList) throws InstanceException {
-
- // TODO: This can be implemented way simpler...
- // Iterate over all instance types
- final Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMinimumIterator();
- final List<AllocatedResource> assignedResources = new ArrayList<AllocatedResource>();
- boolean assignmentSuccessful = true;
-
- while (it.hasNext()) {
-
- // Iterate over all requested instances of a specific type
- final Map.Entry<InstanceType, Integer> entry = it.next();
-
- for (int i = 0; i < entry.getValue().intValue(); i++) {
-
- synchronized (this.synchronizationObject) {
- boolean instanceFound = false;
- for(LocalInstance instance: localInstances.values()){
- if(!allocatedResources.containsKey(instance)){
- AllocatedResource assignedResource = new AllocatedResource(instance, entry.getKey(),
- new AllocationID());
- allocatedResources.put(instance, assignedResource);
- assignedResources.add(assignedResource);
- instanceFound = true;
- break;
- }
- }
-
- assignmentSuccessful &= instanceFound;
- }
- }
- }
-
- if(assignmentSuccessful){
- new LocalInstanceNotifier(this.instanceListener, jobID, assignedResources).start();
- }else{
- throw new InstanceException("Could not satisfy instance request.");
- }
- }
-
- @Override
- public AbstractInstance getInstanceByName(final String name) {
- if (name == null) {
- throw new IllegalArgumentException("Argument name must not be null");
- }
-
- synchronized (this.synchronizationObject) {
- for(LocalInstance instance :localInstances.values()){
- if(name.equals(instance.getName())){
- return instance;
- }
- }
- }
- return null;
- }
-
-
- @Override
- public void cancelPendingRequests(final JobID jobID) {
- // The local instance manager does not support pending requests, so nothing to do here
- }
-
- @Override
- public int getNumberOfTaskTrackers() {
- return localInstances.size();
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceNotifier.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceNotifier.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceNotifier.java
deleted file mode 100644
index 52da691..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/local/LocalInstanceNotifier.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.local;
-
-import java.util.List;
-
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.jobgraph.JobID;
-
-/**
- * This class is an auxiliary class to send the notification
- * about the availability of an {@link AllocatedResource} to the given {@link InstanceListener} object. The notification
- * must be sent from
- * a separate thread, otherwise the atomic operation of requesting an instance
- * for a vertex and switching to the state ASSINING could not be guaranteed.
- * This class is thread-safe.
- *
- */
-public class LocalInstanceNotifier extends Thread {
-
- /**
- * The {@link InstanceListener} object to send the notification to.
- */
- private final InstanceListener instanceListener;
-
- /**
- * The ID of the job the new instance belongs to.
- */
- private final JobID jobID;
-
- /**
- * The resources allocated for the job.
- */
- private final List<AllocatedResource> allocatedResources;
-
- /**
- * Constructs a new instance notifier object.
- *
- * @param instanceListener
- * the listener object to send the notification to
- * @param jobID
- * the ID of the job the newly allocated resources belongs to
- * @param allocatedResource
- * the resources allocated for the job
- */
- public LocalInstanceNotifier(final InstanceListener instanceListener, final JobID jobID, final List<AllocatedResource> allocatedResources) {
- this.instanceListener = instanceListener;
- this.jobID = jobID;
- this.allocatedResources = allocatedResources;
- }
-
-
- @Override
- public void run() {
-
- this.instanceListener.resourcesAllocated(this.jobID, this.allocatedResources);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
index b4c51f2..d64c622 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
@@ -67,16 +67,6 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
private int numberOfSubtasks = -1;
/**
- * The type of instance to be assigned to this task at runtime.
- */
- private String instanceType = null;
-
- /**
- * Number of subtasks to share the same instance at runtime.
- */
- private int numberOfSubtasksPerInstance = -1;
-
- /**
* Number of retries in case of an error before the task represented by this vertex is considered as failed.
*/
private int numberOfExecutionRetries = -1;
@@ -150,8 +140,6 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
* the vertex this vertex should connect to
* @param channelType
* the channel type the two vertices should be connected by at runtime
- * @param compressionLevel
- * the compression level the corresponding channel should have at runtime
* @throws JobGraphDefinitionException
* thrown if the given vertex cannot be connected to <code>vertex</code> in the requested manner
*/
@@ -166,8 +154,8 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
* the vertex this vertex should connect to
* @param channelType
* the channel type the two vertices should be connected by at runtime
- * @param compressionLevel
- * the compression level the corresponding channel should have at runtime
+ * @param distributionPattern
+ * the distribution pattern between the two job vertices
* @throws JobGraphDefinitionException
* thrown if the given vertex cannot be connected to <code>vertex</code> in the requested manner
*/
@@ -184,14 +172,14 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
* the vertex this vertex should connect to
* @param channelType
* the channel type the two vertices should be connected by at runtime
- * @param compressionLevel
- * the compression level the corresponding channel should have at runtime
* @param indexOfOutputGate
* index of the producing task's output gate to be used, <code>-1</code> will determine the next free index
* number
* @param indexOfInputGate
* index of the consuming task's input gate to be used, <code>-1</code> will determine the next free index
* number
+ * @param distributionPattern
+ * the distribution pattern between the two job vertices
* @throws JobGraphDefinitionException
* thrown if the given vertex cannot be connected to <code>vertex</code> in the requested manner
*/
@@ -274,12 +262,12 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
* the job vertex to connect to
* @param channelType
* the channel type the two vertices should be connected by at runtime
- * @param compressionLevel
- * the compression level the corresponding channel should have at runtime
* @param indexOfOutputGate
* index of the producing task's output gate to be used
* @param indexOfInputGate
* index of the consuming task's input gate to be used
+ * @param distributionPattern
+ * the distribution pattern between the two job vertices
*/
private void connectBacklink(final AbstractJobVertex vertex, final ChannelType channelType,
final int indexOfOutputGate, final int indexOfInputGate,
@@ -364,32 +352,6 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
}
/**
- * Returns the index of the edge which is used to connect the given job vertex to this job vertex.
- *
- * @param jv
- * the connected job vertex
- * @return the index of the edge which is used to connect the given job vertex to this job vertex or -1 if the given
- * vertex is not connected to this job vertex
- */
- /*
- * public int getBackwardConnectionIndex(AbstractJobVertex jv) {
- * if(jv == null) {
- * return -1;
- * }
- * final Iterator<JobEdge> it = this.backwardEdges.iterator();
- * int i = 0;
- * while(it.hasNext()) {
- * final JobEdge edge = it.next();
- * if(edge.getConnectedVertex() == jv) {
- * return i;
- * }
- * i++;
- * }
- * return -1;
- * }
- */
-
- /**
* Returns the ID of this job vertex.
*
* @return the ID of this job vertex
@@ -407,15 +369,9 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
throw new IOException("jobGraph is null, cannot deserialize");
}
- // Read instance type
- this.instanceType = StringRecord.readString(in);
-
// Read number of subtasks
this.numberOfSubtasks = in.readInt();
- // Read number of subtasks per instance
- this.numberOfSubtasksPerInstance = in.readInt();
-
// Number of execution retries
this.numberOfExecutionRetries = in.readInt();
@@ -489,15 +445,9 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
@Override
public void write(final DataOutput out) throws IOException {
- // Instance type
- StringRecord.writeString(out, this.instanceType);
-
// Number of subtasks
out.writeInt(this.numberOfSubtasks);
- // Number of subtasks per instance
- out.writeInt(this.numberOfSubtasksPerInstance);
-
// Number of execution retries
out.writeInt(this.numberOfExecutionRetries);
@@ -595,44 +545,6 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
}
/**
- * Sets the instance type the task this vertex represents should run on.
- *
- * @param instanceType
- * the instance type the task this vertex represents should run on
- */
- public void setInstanceType(final String instanceType) {
- this.instanceType = instanceType;
- }
-
- /**
- * Returns the instance type the task this vertex represents should run on.
- *
- * @return the instance type the task this vertex represents should run on, <code>null</code> if unspecified
- */
- public String getInstanceType() {
- return this.instanceType;
- }
-
- /**
- * Sets the number of subtasks that should be assigned to the same instance.
- *
- * @param numberOfSubtasksPerInstance
- * the number of subtasks that should be assigned to the same instance
- */
- public void setNumberOfSubtasksPerInstance(final int numberOfSubtasksPerInstance) {
- this.numberOfSubtasksPerInstance = numberOfSubtasksPerInstance;
- }
-
- /**
- * Returns the number of subtasks that should be assigned to the same instance, <code>-1</code> if undefined.
- *
- * @return the number of subtasks that should be assigned to the same instance, <code>-1</code> if undefined
- */
- public int getNumberOfSubtasksPerInstance() {
- return this.numberOfSubtasksPerInstance;
- }
-
- /**
* Sets the vertex this vertex should share its instances with at runtime.
*
* @param vertex
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/DeploymentManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/DeploymentManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/DeploymentManager.java
index bf017ce..b043ecd 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/DeploymentManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/DeploymentManager.java
@@ -16,19 +16,19 @@ package eu.stratosphere.nephele.jobmanager;
import java.util.List;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.instance.AbstractInstance;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.nephele.jobgraph.JobID;
/**
* A deployment manager is responsible for deploying a list of {@link ExecutionVertex} objects the given
- * {@link AbstractInstance}. It is called by a {@link AbstractScheduler} implementation whenever at least one
+ * {@link eu.stratosphere.nephele.instance.Instance}. It is called by a {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} implementation whenever at least one
* {@link ExecutionVertex} has become ready to be executed.
*
*/
public interface DeploymentManager {
/**
- * Deploys the list of vertices on the given {@link AbstractInstance}.
+ * Deploys the list of vertices on the given {@link eu.stratosphere.nephele.instance.Instance}.
*
* @param jobID
* the ID of the job the vertices to be deployed belong to
@@ -37,5 +37,5 @@ public interface DeploymentManager {
* @param verticesToBeDeployed
* the list of vertices to be deployed
*/
- void deploy(JobID jobID, AbstractInstance instance, List<ExecutionVertex> verticesToBeDeployed);
+ void deploy(JobID jobID, Instance instance, List<ExecutionVertex> verticesToBeDeployed);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/EventCollector.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/EventCollector.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/EventCollector.java
index a6f9cfe..37f9a43 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/EventCollector.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/EventCollector.java
@@ -38,7 +38,7 @@ import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
import eu.stratosphere.nephele.executiongraph.JobStatusListener;
import eu.stratosphere.nephele.executiongraph.ManagementGraphFactory;
import eu.stratosphere.nephele.executiongraph.VertexAssignmentListener;
-import eu.stratosphere.nephele.instance.AbstractInstance;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.nephele.instance.AllocatedResource;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.jobgraph.JobStatus;
@@ -266,10 +266,10 @@ public final class EventCollector extends TimerTask implements ProfilingListener
final ManagementVertexID managementVertexID = id.toManagementVertexID();
final long timestamp = System.currentTimeMillis();
- final AbstractInstance instance = newAllocatedResource.getInstance();
+ final Instance instance = newAllocatedResource.getInstance();
VertexAssignmentEvent event;
if (instance == null) {
- event = new VertexAssignmentEvent(timestamp, managementVertexID, "null", "null");
+ event = new VertexAssignmentEvent(timestamp, managementVertexID, "null");
} else {
String instanceName = null;
@@ -279,8 +279,7 @@ public final class EventCollector extends TimerTask implements ProfilingListener
instanceName = instance.toString();
}
- event = new VertexAssignmentEvent(timestamp, managementVertexID, instanceName, instance.getType()
- .getIdentifier());
+ event = new VertexAssignmentEvent(timestamp, managementVertexID, instanceName);
}
this.eventCollector.updateManagementGraph(jobID, event);
@@ -609,7 +608,6 @@ public final class EventCollector extends TimerTask implements ProfilingListener
}
vertex.setInstanceName(vertexAssignmentEvent.getInstanceName());
- vertex.setInstanceType(vertexAssignmentEvent.getInstanceType());
}
}
[02/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/TransitiveClosureNaiveITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/TransitiveClosureNaiveITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/TransitiveClosureNaiveITCase.java
index feb5f34..95d75fb 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/TransitiveClosureNaiveITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/TransitiveClosureNaiveITCase.java
@@ -39,7 +39,7 @@ public class TransitiveClosureNaiveITCase extends RecordAPITestBase {
protected Plan getTestJob() {
TransitiveClosureNaive transitiveClosureNaive = new TransitiveClosureNaive();
// "2" is the number of iterations here
- return transitiveClosureNaive.getScalaPlan(4, 2, verticesPath, edgesPath, resultPath);
+ return transitiveClosureNaive.getScalaPlan(DOP, 2, verticesPath, edgesPath, resultPath);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WebLogAnalysisITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WebLogAnalysisITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WebLogAnalysisITCase.java
index 2ddef0c..67c5ce1 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WebLogAnalysisITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WebLogAnalysisITCase.java
@@ -21,6 +21,6 @@ public class WebLogAnalysisITCase extends eu.stratosphere.test.recordJobTests.We
@Override
protected Plan getTestJob() {
WebLogAnalysis webLogAnalysis = new WebLogAnalysis();
- return webLogAnalysis.getScalaPlan(4, docsPath, ranksPath, visitsPath, resultPath);
+ return webLogAnalysis.getScalaPlan(DOP, docsPath, ranksPath, visitsPath, resultPath);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountITCase.java
index 205828f..42ee31a 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountITCase.java
@@ -22,6 +22,6 @@ public class WordCountITCase extends eu.stratosphere.test.recordJobTests.WordCou
@Override
protected Plan getTestJob() {
WordCount wc = new WordCount();
- return wc.getScalaPlan(4, textPath, resultPath);
+ return wc.getScalaPlan(DOP, textPath, resultPath);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountPactValueITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountPactValueITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountPactValueITCase.java
index 4627d48..44b37f6 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountPactValueITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountPactValueITCase.java
@@ -22,6 +22,6 @@ public class WordCountPactValueITCase extends eu.stratosphere.test.recordJobTest
@Override
protected Plan getTestJob() {
WordCountWithUserDefinedType wc = new WordCountWithUserDefinedType();
- return wc.getScalaPlan(4, textPath, resultPath);
+ return wc.getScalaPlan(DOP, textPath, resultPath);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountWithCountFunctionITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountWithCountFunctionITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountWithCountFunctionITCase.java
index 12082ac..a8eba29 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountWithCountFunctionITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleScalaPrograms/WordCountWithCountFunctionITCase.java
@@ -20,6 +20,6 @@ public class WordCountWithCountFunctionITCase extends eu.stratosphere.test.recor
@Override
protected Plan getTestJob() {
- return new WordCountWithCount().getScalaPlan(4, textPath, resultPath);
+ return new WordCountWithCount().getScalaPlan(DOP, textPath, resultPath);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/failingPrograms/TaskFailureITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/failingPrograms/TaskFailureITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/failingPrograms/TaskFailureITCase.java
index 3f4791e..4f5d38d 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/failingPrograms/TaskFailureITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/failingPrograms/TaskFailureITCase.java
@@ -36,6 +36,8 @@ import eu.stratosphere.util.Collector;
*/
public class TaskFailureITCase extends FailingTestBase {
+ private static final int DOP = 4;
+
// input for map tasks
private static final String MAP_IN = "1 1\n2 2\n2 8\n4 4\n4 4\n6 6\n7 7\n8 8\n" +
"1 1\n2 2\n2 2\n4 4\n4 4\n6 3\n5 9\n8 8\n" +
@@ -47,6 +49,10 @@ public class TaskFailureITCase extends FailingTestBase {
private String inputPath;
private String resultPath;
+
+ public TaskFailureITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
protected void preSubmit() throws Exception {
@@ -73,7 +79,7 @@ public class TaskFailureITCase extends FailingTestBase {
// generate plan
Plan plan = new Plan(output);
- plan.setDefaultParallelism(4);
+ plan.setDefaultParallelism(DOP);
// optimize and compile plan
PactCompiler pc = new PactCompiler(new DataStatistics());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/CoGroupConnectedComponentsITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/CoGroupConnectedComponentsITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/CoGroupConnectedComponentsITCase.java
index c937435..f8d82ae 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/CoGroupConnectedComponentsITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/CoGroupConnectedComponentsITCase.java
@@ -50,6 +50,10 @@ public class CoGroupConnectedComponentsITCase extends RecordAPITestBase {
protected String verticesPath;
protected String edgesPath;
protected String resultPath;
+
+ public CoGroupConnectedComponentsITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
@@ -61,7 +65,7 @@ public class CoGroupConnectedComponentsITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
- return getPlan(4, verticesPath, edgesPath, resultPath, 100);
+ return getPlan(DOP, verticesPath, edgesPath, resultPath, 100);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsITCase.java
index 9c88bb5..53feae6 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsITCase.java
@@ -33,6 +33,10 @@ public class ConnectedComponentsITCase extends RecordAPITestBase {
protected String verticesPath;
protected String edgesPath;
protected String resultPath;
+
+ public ConnectedComponentsITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
@@ -45,7 +49,7 @@ public class ConnectedComponentsITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
WorksetConnectedComponents cc = new WorksetConnectedComponents();
- return cc.getPlan("4", verticesPath, edgesPath, resultPath, "100");
+ return cc.getPlan(new Integer(DOP).toString(), verticesPath, edgesPath, resultPath, "100");
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithDeferredUpdateITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithDeferredUpdateITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithDeferredUpdateITCase.java
index 8de877c..eb2eec0 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithDeferredUpdateITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithDeferredUpdateITCase.java
@@ -59,6 +59,7 @@ public class ConnectedComponentsWithDeferredUpdateITCase extends RecordAPITestBa
public ConnectedComponentsWithDeferredUpdateITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -71,7 +72,7 @@ public class ConnectedComponentsWithDeferredUpdateITCase extends RecordAPITestBa
@Override
protected Plan getTestJob() {
boolean extraMapper = config.getBoolean("ExtraMapper", false);
- return getPlan(4, verticesPath, edgesPath, resultPath, 100, extraMapper);
+ return getPlan(DOP, verticesPath, edgesPath, resultPath, 100, extraMapper);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithSolutionSetFirstITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithSolutionSetFirstITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithSolutionSetFirstITCase.java
index 5c696a2..f040f06 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithSolutionSetFirstITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/ConnectedComponentsWithSolutionSetFirstITCase.java
@@ -51,6 +51,10 @@ public class ConnectedComponentsWithSolutionSetFirstITCase extends RecordAPITest
protected String verticesPath;
protected String edgesPath;
protected String resultPath;
+
+ public ConnectedComponentsWithSolutionSetFirstITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
@@ -62,7 +66,8 @@ public class ConnectedComponentsWithSolutionSetFirstITCase extends RecordAPITest
@Override
protected Plan getTestJob() {
- return getPlanForWorksetConnectedComponentsWithSolutionSetAsFirstInput(4, verticesPath, edgesPath, resultPath, 100);
+ return getPlanForWorksetConnectedComponentsWithSolutionSetAsFirstInput(DOP, verticesPath, edgesPath,
+ resultPath, 100);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DanglingPageRankITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DanglingPageRankITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DanglingPageRankITCase.java
index e84f21e..5390eed 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DanglingPageRankITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DanglingPageRankITCase.java
@@ -35,6 +35,7 @@ public class DanglingPageRankITCase extends RecordAPITestBase {
public DanglingPageRankITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@@ -63,7 +64,7 @@ public class DanglingPageRankITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config1 = new Configuration();
- config1.setInteger("PageRankITCase#NoSubtasks", 4);
+ config1.setInteger("PageRankITCase#NoSubtasks", DOP);
config1.setString("PageRankITCase#NumIterations", "25");
return toParameterList(config1);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DeltaPageRankITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DeltaPageRankITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DeltaPageRankITCase.java
index 8f06929..1eb81ed 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DeltaPageRankITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DeltaPageRankITCase.java
@@ -36,6 +36,7 @@ public class DeltaPageRankITCase extends RecordAPITestBase {
public DeltaPageRankITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -66,7 +67,7 @@ public class DeltaPageRankITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config1 = new Configuration();
- config1.setInteger("NumSubtasks", 4);
+ config1.setInteger("NumSubtasks", DOP);
config1.setInteger("NumIterations", 3);
return toParameterList(config1);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DependencyConnectedComponentsITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DependencyConnectedComponentsITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DependencyConnectedComponentsITCase.java
index deda551..e1339e3 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DependencyConnectedComponentsITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/DependencyConnectedComponentsITCase.java
@@ -28,7 +28,6 @@ import eu.stratosphere.api.java.tuple.Tuple2;
import eu.stratosphere.test.util.JavaProgramTestBase;
import eu.stratosphere.util.Collector;
-
/**
*
* Iterative Connected Components test case which recomputes only the elements
@@ -46,6 +45,10 @@ public class DependencyConnectedComponentsITCase extends JavaProgramTestBase {
protected static List<Tuple2<Long, Long>> edgesInput = new ArrayList<Tuple2<Long, Long>>();
private String resultPath;
private String expectedResult;
+
+ public DependencyConnectedComponentsITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTerminationTail.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTerminationTail.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTerminationTail.java
index 3c38263..50c1970 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTerminationTail.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTerminationTail.java
@@ -40,6 +40,10 @@ public class IterationTerminationWithTerminationTail extends RecordAPITestBase {
protected String dataPath;
protected String resultPath;
+
+ public IterationTerminationWithTerminationTail(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
protected void preSubmit() throws Exception {
@@ -54,7 +58,7 @@ public class IterationTerminationWithTerminationTail extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
- return getTestPlanPlan(4, dataPath, resultPath);
+ return getTestPlanPlan(DOP, dataPath, resultPath);
}
private static Plan getTestPlanPlan(int numSubTasks, String input, String output) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTwoTails.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTwoTails.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTwoTails.java
index f3c6cbb..cfe0510 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTwoTails.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationTerminationWithTwoTails.java
@@ -42,6 +42,10 @@ public class IterationTerminationWithTwoTails extends RecordAPITestBase {
protected String dataPath;
protected String resultPath;
+ public IterationTerminationWithTwoTails(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
dataPath = createTempFile("datapoints.txt", INPUT);
@@ -55,7 +59,7 @@ public class IterationTerminationWithTwoTails extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
- return getTestPlanPlan(4, dataPath, resultPath);
+ return getTestPlanPlan(DOP, dataPath, resultPath);
}
private static Plan getTestPlanPlan(int numSubTasks, String input, String output) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithAllReducerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithAllReducerITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithAllReducerITCase.java
index c7e28e2..7085776 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithAllReducerITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithAllReducerITCase.java
@@ -40,7 +40,9 @@ public class IterationWithAllReducerITCase extends RecordAPITestBase {
protected String dataPath;
protected String resultPath;
-
+ public IterationWithAllReducerITCase(){
+ setTaskManagerNumSlots(4);
+ }
@Override
protected void preSubmit() throws Exception {
@@ -55,7 +57,7 @@ public class IterationWithAllReducerITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
- Plan plan = getTestPlanPlan(4, dataPath, resultPath);
+ Plan plan = getTestPlanPlan(DOP, dataPath, resultPath);
return plan;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithChainingITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithChainingITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithChainingITCase.java
index 55c7a35..dba561d 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithChainingITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithChainingITCase.java
@@ -46,6 +46,7 @@ public class IterationWithChainingITCase extends RecordAPITestBase {
public IterationWithChainingITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -69,7 +70,7 @@ public class IterationWithChainingITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config1 = new Configuration();
- config1.setInteger("ChainedMapperITCase#NoSubtasks", 4);
+ config1.setInteger("ChainedMapperITCase#NoSubtasks", DOP);
return toParameterList(config1);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithUnionITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithUnionITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithUnionITCase.java
index 1272f6e..884dd64 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithUnionITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterationWithUnionITCase.java
@@ -46,6 +46,7 @@ public class IterationWithUnionITCase extends RecordAPITestBase {
public IterationWithUnionITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -67,7 +68,7 @@ public class IterationWithUnionITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config1 = new Configuration();
- config1.setInteger("IterationWithUnionITCase#NumSubtasks", 4);
+ config1.setInteger("IterationWithUnionITCase#NumSubtasks", DOP);
return toParameterList(config1);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterativeKMeansITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterativeKMeansITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterativeKMeansITCase.java
index c8da45f..199f108 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterativeKMeansITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/IterativeKMeansITCase.java
@@ -27,6 +27,10 @@ public class IterativeKMeansITCase extends RecordAPITestBase {
protected String dataPath;
protected String clusterPath;
protected String resultPath;
+
+ public IterativeKMeansITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
protected void preSubmit() throws Exception {
@@ -38,7 +42,7 @@ public class IterativeKMeansITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
KMeansBroadcast kmi = new KMeansBroadcast();
- return kmi.getPlan("4", dataPath, clusterPath, resultPath, "20");
+ return kmi.getPlan(String.valueOf(DOP), dataPath, clusterPath, resultPath, "20");
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/KMeansITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/KMeansITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/KMeansITCase.java
index 7085c3c..d67da35 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/KMeansITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/KMeansITCase.java
@@ -27,7 +27,11 @@ public class KMeansITCase extends RecordAPITestBase {
protected String dataPath;
protected String clusterPath;
protected String resultPath;
-
+
+ public KMeansITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
dataPath = createTempFile("datapoints.txt", KMeansData.DATAPOINTS);
@@ -38,7 +42,7 @@ public class KMeansITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
KMeansBroadcast kmi = new KMeansBroadcast();
- return kmi.getPlan("4", dataPath, clusterPath, resultPath, "20");
+ return kmi.getPlan(String.valueOf(DOP), dataPath, clusterPath, resultPath, "20");
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/LineRankITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/LineRankITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/LineRankITCase.java
index f38fb7e..7c68605 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/LineRankITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/LineRankITCase.java
@@ -54,6 +54,7 @@ public class LineRankITCase extends RecordAPITestBase {
public LineRankITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -68,7 +69,7 @@ public class LineRankITCase extends RecordAPITestBase {
LineRank lr = new LineRank();
Plan plan = lr.getScalaPlan(
- config.getInteger("NumSubtasks", 1),
+ config.getInteger("NumSubtasks", 1),
sourcesPath,
targetsPath,
9,
@@ -79,7 +80,7 @@ public class LineRankITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config1 = new Configuration();
- config1.setInteger("NumSubtasks", 4);
+ config1.setInteger("NumSubtasks", DOP);
config1.setInteger("NumIterations", 5);
return toParameterList(config1);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/PageRankITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/PageRankITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/PageRankITCase.java
index 8b12b53..58d8170 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/PageRankITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/PageRankITCase.java
@@ -38,6 +38,7 @@ public class PageRankITCase extends RecordAPITestBase {
public PageRankITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -64,7 +65,7 @@ public class PageRankITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config1 = new Configuration();
- config1.setInteger("NumSubtasks", 4);
+ config1.setInteger("NumSubtasks", DOP);
config1.setString("NumIterations", "5");
return toParameterList(config1);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/ConnectedComponentsNepheleITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/ConnectedComponentsNepheleITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/ConnectedComponentsNepheleITCase.java
index 7eff1aa..b2b3df2 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/ConnectedComponentsNepheleITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/ConnectedComponentsNepheleITCase.java
@@ -82,6 +82,10 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
private static final long MEM_PER_CONSUMER = 3;
+ private static final int DOP = 4;
+
+ private static final double MEM_FRAC_PER_CONSUMER = (double)MEM_PER_CONSUMER/TASK_MANAGER_MEMORY_SIZE*DOP;
+
protected String verticesPath;
protected String edgesPath;
@@ -90,6 +94,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
public ConnectedComponentsNepheleITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Parameters
@@ -118,20 +123,19 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
@Override
protected JobGraph getJobGraph() throws Exception {
- int dop = 4;
int maxIterations = 100;
int type = config.getInteger("testcase", 0);
switch (type) {
case 1:
- return createJobGraphUnifiedTails(verticesPath, edgesPath, resultPath, dop, maxIterations);
+ return createJobGraphUnifiedTails(verticesPath, edgesPath, resultPath, DOP, maxIterations);
case 2:
- return createJobGraphSeparateTails(verticesPath, edgesPath, resultPath, dop, maxIterations);
+ return createJobGraphSeparateTails(verticesPath, edgesPath, resultPath, DOP, maxIterations);
case 3:
- return createJobGraphIntermediateWorksetUpdateAndSolutionSetTail(verticesPath, edgesPath, resultPath, dop,
+ return createJobGraphIntermediateWorksetUpdateAndSolutionSetTail(verticesPath, edgesPath, resultPath, DOP,
maxIterations);
case 4:
- return createJobGraphSolutionSetUpdateAndWorksetTail(verticesPath, edgesPath, resultPath, dop,
+ return createJobGraphSolutionSetUpdateAndWorksetTail(verticesPath, edgesPath, resultPath, DOP,
maxIterations);
default:
throw new RuntimeException("Broken test configuration");
@@ -167,7 +171,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
@SuppressWarnings("unchecked")
CsvInputFormat verticesInFormat = new CsvInputFormat(' ', LongValue.class);
JobInputVertex verticesInput = JobGraphUtils.createInput(verticesInFormat, verticesPath, "VerticesInput",
- jobGraph, numSubTasks, numSubTasks);
+ jobGraph, numSubTasks);
TaskConfig verticesInputConfig = new TaskConfig(verticesInput.getConfiguration());
{
verticesInputConfig.addOutputShipStrategy(ShipStrategyType.FORWARD);
@@ -199,7 +203,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
@SuppressWarnings("unchecked")
CsvInputFormat edgesInFormat = new CsvInputFormat(' ', LongValue.class, LongValue.class);
JobInputVertex edgesInput = JobGraphUtils.createInput(edgesInFormat, edgesPath, "EdgesInput", jobGraph,
- numSubTasks, numSubTasks);
+ numSubTasks);
TaskConfig edgesInputConfig = new TaskConfig(edgesInput.getConfiguration());
{
edgesInputConfig.setOutputSerializer(serializer);
@@ -216,7 +220,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
TypePairComparatorFactory<?, ?> pairComparator) {
JobTaskVertex head = JobGraphUtils.createTask(IterationHeadPactTask.class, "Join With Edges (Iteration Head)",
- jobGraph, numSubTasks, numSubTasks);
+ jobGraph, numSubTasks);
TaskConfig headConfig = new TaskConfig(head.getConfiguration());
{
headConfig.setIterationId(ITERATION_ID);
@@ -234,7 +238,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
headConfig.setInputComparator(comparator, 1);
headConfig.setInputLocalStrategy(1, LocalStrategy.NONE);
headConfig.setInputCached(1, true);
- headConfig.setInputMaterializationMemory(1, MEM_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeInputMaterializationMemory(1, MEM_FRAC_PER_CONSUMER);
// initial solution set input
headConfig.addInputToGroup(2);
@@ -248,8 +252,8 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// back channel / iterations
headConfig.setIsWorksetIteration();
- headConfig.setBackChannelMemory(MEM_PER_CONSUMER * JobGraphUtils.MEGABYTE);
- headConfig.setSolutionSetMemory(MEM_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeBackChannelMemory(MEM_FRAC_PER_CONSUMER);
+ headConfig.setRelativeSolutionSetMemory(MEM_FRAC_PER_CONSUMER );
// output into iteration
headConfig.setOutputSerializer(serializer);
@@ -273,7 +277,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
headConfig.setDriverComparator(comparator, 0);
headConfig.setDriverComparator(comparator, 1);
headConfig.setDriverPairComparator(pairComparator);
- headConfig.setMemoryDriver(MEM_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeMemoryDriver(MEM_FRAC_PER_CONSUMER);
headConfig.addIterationAggregator(
WorksetEmptyConvergenceCriterion.AGGREGATOR_NAME, new LongSumAggregator());
@@ -288,7 +292,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// --------------- the intermediate (reduce to min id) ---------------
JobTaskVertex intermediate = JobGraphUtils.createTask(IterationIntermediatePactTask.class,
- "Find Min Component-ID", jobGraph, numSubTasks, numSubTasks);
+ "Find Min Component-ID", jobGraph, numSubTasks);
TaskConfig intermediateConfig = new TaskConfig(intermediate.getConfiguration());
{
intermediateConfig.setIterationId(ITERATION_ID);
@@ -297,7 +301,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
intermediateConfig.setInputSerializer(serializer, 0);
intermediateConfig.setInputComparator(comparator, 0);
intermediateConfig.setInputLocalStrategy(0, LocalStrategy.SORT);
- intermediateConfig.setMemoryInput(0, MEM_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ intermediateConfig.setRelativeMemoryInput(0, MEM_FRAC_PER_CONSUMER);
intermediateConfig.setFilehandlesInput(0, 64);
intermediateConfig.setSpillingThresholdInput(0, 0.85f);
@@ -316,7 +320,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
private static JobOutputVertex createOutput(JobGraph jobGraph, String resultPath, int numSubTasks,
TypeSerializerFactory<?> serializer) {
- JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Final Output", numSubTasks, numSubTasks);
+ JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Final Output", numSubTasks);
TaskConfig outputConfig = new TaskConfig(output.getConfiguration());
{
@@ -341,7 +345,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
private static JobOutputVertex createFakeTail(JobGraph jobGraph, int numSubTasks) {
JobOutputVertex fakeTailOutput =
- JobGraphUtils.createFakeOutput(jobGraph, "FakeTailOutput", numSubTasks, numSubTasks);
+ JobGraphUtils.createFakeOutput(jobGraph, "FakeTailOutput", numSubTasks);
return fakeTailOutput;
}
@@ -389,7 +393,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// --------------- the tail (solution set join) ---------------
JobTaskVertex tail = JobGraphUtils.createTask(IterationTailPactTask.class, "IterationTail", jobGraph,
- numSubTasks, numSubTasks);
+ numSubTasks);
TaskConfig tailConfig = new TaskConfig(tail.getConfiguration());
{
tailConfig.setIterationId(ITERATION_ID);
@@ -480,7 +484,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// ------------------ the intermediate (ss join) ----------------------
JobTaskVertex ssJoinIntermediate = JobGraphUtils.createTask(IterationIntermediatePactTask.class,
- "Solution Set Join", jobGraph, numSubTasks, numSubTasks);
+ "Solution Set Join", jobGraph, numSubTasks);
TaskConfig ssJoinIntermediateConfig = new TaskConfig(ssJoinIntermediate.getConfiguration());
{
ssJoinIntermediateConfig.setIterationId(ITERATION_ID);
@@ -509,7 +513,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// -------------------------- ss tail --------------------------------
JobTaskVertex ssTail = JobGraphUtils.createTask(IterationTailPactTask.class, "IterationSolutionSetTail",
- jobGraph, numSubTasks, numSubTasks);
+ jobGraph, numSubTasks);
TaskConfig ssTailConfig = new TaskConfig(ssTail.getConfiguration());
{
ssTailConfig.setIterationId(ITERATION_ID);
@@ -520,7 +524,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
ssTailConfig.addInputToGroup(0);
ssTailConfig.setInputSerializer(serializer, 0);
ssTailConfig.setInputAsynchronouslyMaterialized(0, true);
- ssTailConfig.setInputMaterializationMemory(0, MEM_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ ssTailConfig.setRelativeInputMaterializationMemory(0, MEM_FRAC_PER_CONSUMER);
// output
ssTailConfig.addOutputShipStrategy(ShipStrategyType.FORWARD);
@@ -534,7 +538,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// -------------------------- ws tail --------------------------------
JobTaskVertex wsTail = JobGraphUtils.createTask(IterationTailPactTask.class, "IterationWorksetTail",
- jobGraph, numSubTasks, numSubTasks);
+ jobGraph, numSubTasks);
TaskConfig wsTailConfig = new TaskConfig(wsTail.getConfiguration());
{
wsTailConfig.setIterationId(ITERATION_ID);
@@ -631,7 +635,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// ------------------ the intermediate (ws update) ----------------------
JobTaskVertex wsUpdateIntermediate =
JobGraphUtils.createTask(IterationIntermediatePactTask.class, "WorksetUpdate", jobGraph,
- numSubTasks, numSubTasks);
+ numSubTasks);
TaskConfig wsUpdateConfig = new TaskConfig(wsUpdateIntermediate.getConfiguration());
{
wsUpdateConfig.setIterationId(ITERATION_ID);
@@ -661,7 +665,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// -------------------------- ss tail --------------------------------
JobTaskVertex ssTail =
JobGraphUtils.createTask(IterationTailPactTask.class, "IterationSolutionSetTail", jobGraph,
- numSubTasks, numSubTasks);
+ numSubTasks);
TaskConfig ssTailConfig = new TaskConfig(ssTail.getConfiguration());
{
ssTailConfig.setIterationId(ITERATION_ID);
@@ -754,7 +758,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// ------------------ the intermediate (ss update) ----------------------
JobTaskVertex ssJoinIntermediate = JobGraphUtils.createTask(IterationIntermediatePactTask.class,
- "Solution Set Update", jobGraph, numSubTasks, numSubTasks);
+ "Solution Set Update", jobGraph, numSubTasks);
TaskConfig ssJoinIntermediateConfig = new TaskConfig(ssJoinIntermediate.getConfiguration());
{
ssJoinIntermediateConfig.setIterationId(ITERATION_ID);
@@ -782,7 +786,7 @@ public class ConnectedComponentsNepheleITCase extends RecordAPITestBase {
// -------------------------- ws tail --------------------------------
JobTaskVertex wsTail = JobGraphUtils.createTask(IterationTailPactTask.class, "IterationWorksetTail",
- jobGraph, numSubTasks, numSubTasks);
+ jobGraph, numSubTasks);
TaskConfig wsTailConfig = new TaskConfig(wsTail.getConfiguration());
{
wsTailConfig.setIterationId(ITERATION_ID);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankNepheleITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankNepheleITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankNepheleITCase.java
index 872cabc..002533a 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankNepheleITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankNepheleITCase.java
@@ -34,6 +34,10 @@ public class DanglingPageRankNepheleITCase extends RecordAPITestBase {
protected String edgesPath;
protected String resultPath;
+ public DanglingPageRankNepheleITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
+
@Override
protected void preSubmit() throws Exception {
@@ -45,8 +49,7 @@ public class DanglingPageRankNepheleITCase extends RecordAPITestBase {
@Override
protected JobGraph getJobGraph() throws Exception {
String[] parameters = new String[] {
- "4",
- "4",
+ new Integer(DOP).toString(),
pagesWithRankPath,
edgesPath,
resultPath,
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankWithCombinerNepheleITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankWithCombinerNepheleITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankWithCombinerNepheleITCase.java
index 59d5206..e42620e 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankWithCombinerNepheleITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/DanglingPageRankWithCombinerNepheleITCase.java
@@ -22,6 +22,10 @@ public class DanglingPageRankWithCombinerNepheleITCase extends RecordAPITestBase
protected String pagesWithRankPath;
protected String edgesPath;
protected String resultPath;
+
+ public DanglingPageRankWithCombinerNepheleITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
protected void preSubmit() throws Exception {
@@ -33,8 +37,7 @@ public class DanglingPageRankWithCombinerNepheleITCase extends RecordAPITestBase
@Override
protected JobGraph getJobGraph() throws Exception {
String[] parameters = new String[] {
- "4",
- "4",
+ new Integer(DOP).toString(),
pagesWithRankPath,
edgesPath,
resultPath,
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/IterationWithChainingNepheleITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/IterationWithChainingNepheleITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/IterationWithChainingNepheleITCase.java
index ef7c9d2..8a68402 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/IterationWithChainingNepheleITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/IterationWithChainingNepheleITCase.java
@@ -75,6 +75,7 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
public IterationWithChainingNepheleITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -94,7 +95,7 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
@Parameterized.Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("ChainedMapperNepheleITCase#NoSubtasks", 2);
+ config.setInteger("ChainedMapperNepheleITCase#NoSubtasks", DOP);
config.setInteger("ChainedMapperNepheleITCase#MaxIterations", 2);
return toParameterList(config);
}
@@ -118,8 +119,6 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
final TypeComparatorFactory<Record> comparator =
new RecordComparatorFactory(new int[] { 0 }, new Class[] { IntValue.class });
- final long MEM_PER_CONSUMER = 2;
-
final int ITERATION_ID = 1;
// --------------------------------------------------------------------------------------------------------------
@@ -128,7 +127,7 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
// - input -----------------------------------------------------------------------------------------------------
JobInputVertex input = JobGraphUtils.createInput(
- new PointInFormat(), inputPath, "Input", jobGraph, numSubTasks, numSubTasks);
+ new PointInFormat(), inputPath, "Input", jobGraph, numSubTasks);
TaskConfig inputConfig = new TaskConfig(input.getConfiguration());
{
inputConfig.setOutputSerializer(serializer);
@@ -137,7 +136,7 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
// - head ------------------------------------------------------------------------------------------------------
JobTaskVertex head = JobGraphUtils.createTask(
- IterationHeadPactTask.class, "Iteration Head", jobGraph, numSubTasks, numSubTasks);
+ IterationHeadPactTask.class, "Iteration Head", jobGraph, numSubTasks);
TaskConfig headConfig = new TaskConfig(head.getConfiguration());
{
headConfig.setIterationId(ITERATION_ID);
@@ -168,12 +167,12 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
headConfig.setStubWrapper(new UserCodeClassWrapper<DummyMapper>(DummyMapper.class));
// back channel
- headConfig.setBackChannelMemory(MEM_PER_CONSUMER * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeBackChannelMemory(1.0);
}
// - tail ------------------------------------------------------------------------------------------------------
JobTaskVertex tail = JobGraphUtils.createTask(
- IterationTailPactTask.class, "Chained Iteration Tail", jobGraph, numSubTasks, numSubTasks);
+ IterationTailPactTask.class, "Chained Iteration Tail", jobGraph, numSubTasks);
TaskConfig tailConfig = new TaskConfig(tail.getConfiguration());
{
tailConfig.setIterationId(ITERATION_ID);
@@ -210,7 +209,7 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
}
// - output ----------------------------------------------------------------------------------------------------
- JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Output", numSubTasks, numSubTasks);
+ JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "Output", numSubTasks);
TaskConfig outputConfig = new TaskConfig(output.getConfiguration());
{
outputConfig.addInputToGroup(0);
@@ -221,7 +220,7 @@ public class IterationWithChainingNepheleITCase extends RecordAPITestBase {
}
// - fake tail -------------------------------------------------------------------------------------------------
- JobOutputVertex fakeTail = JobGraphUtils.createFakeOutput(jobGraph, "Fake Tail", numSubTasks, numSubTasks);
+ JobOutputVertex fakeTail = JobGraphUtils.createFakeOutput(jobGraph, "Fake Tail", numSubTasks);
// - sync ------------------------------------------------------------------------------------------------------
JobOutputVertex sync = JobGraphUtils.createSync(jobGraph, numSubTasks);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
index a229086..109c91a 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
@@ -51,14 +51,14 @@ public class JobGraphUtils {
}
public static <T extends FileInputFormat<?>> JobInputVertex createInput(T stub, String path, String name, JobGraph graph,
- int degreeOfParallelism, int numSubTasksPerInstance)
+ int degreeOfParallelism)
{
stub.setFilePath(path);
- return createInput(new UserCodeObjectWrapper<T>(stub), name, graph, degreeOfParallelism, numSubTasksPerInstance);
+ return createInput(new UserCodeObjectWrapper<T>(stub), name, graph, degreeOfParallelism);
}
private static <T extends InputFormat<?,?>> JobInputVertex createInput(UserCodeWrapper<T> stub, String name, JobGraph graph,
- int degreeOfParallelism, int numSubTasksPerInstance)
+ int degreeOfParallelism)
{
JobInputVertex inputVertex = new JobInputVertex(name, graph);
@@ -67,8 +67,7 @@ public class JobGraphUtils {
inputVertex.setInputClass(clazz);
inputVertex.setNumberOfSubtasks(degreeOfParallelism);
- inputVertex.setNumberOfSubtasksPerInstance(numSubTasksPerInstance);
-
+
TaskConfig inputConfig = new TaskConfig(inputVertex.getConfiguration());
inputConfig.setStubWrapper(stub);
@@ -89,12 +88,11 @@ public class JobGraphUtils {
}
public static JobTaskVertex createTask(@SuppressWarnings("rawtypes") Class<? extends RegularPactTask> task, String name, JobGraph graph,
- int degreeOfParallelism, int numSubtasksPerInstance)
+ int degreeOfParallelism)
{
JobTaskVertex taskVertex = new JobTaskVertex(name, graph);
taskVertex.setTaskClass(task);
taskVertex.setNumberOfSubtasks(degreeOfParallelism);
- taskVertex.setNumberOfSubtasksPerInstance(numSubtasksPerInstance);
return taskVertex;
}
@@ -107,23 +105,19 @@ public class JobGraphUtils {
return sync;
}
- public static JobOutputVertex createFakeOutput(JobGraph jobGraph, String name, int degreeOfParallelism,
- int numSubTasksPerInstance)
+ public static JobOutputVertex createFakeOutput(JobGraph jobGraph, String name, int degreeOfParallelism)
{
JobOutputVertex outputVertex = new JobOutputVertex(name, jobGraph);
outputVertex.setOutputClass(FakeOutputTask.class);
outputVertex.setNumberOfSubtasks(degreeOfParallelism);
- outputVertex.setNumberOfSubtasksPerInstance(numSubTasksPerInstance);
return outputVertex;
}
- public static JobOutputVertex createFileOutput(JobGraph jobGraph, String name, int degreeOfParallelism,
- int numSubTasksPerInstance)
+ public static JobOutputVertex createFileOutput(JobGraph jobGraph, String name, int degreeOfParallelism)
{
JobOutputVertex sinkVertex = new JobOutputVertex(name, jobGraph);
sinkVertex.setOutputClass(DataSinkTask.class);
sinkVertex.setNumberOfSubtasks(degreeOfParallelism);
- sinkVertex.setNumberOfSubtasksPerInstance(numSubTasksPerInstance);
return sinkVertex;
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRank.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRank.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRank.java
index b112741..5b2433e 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRank.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRank.java
@@ -92,7 +92,6 @@ public class CustomCompensatableDanglingPageRank {
public static JobGraph getJobGraph(String[] args) throws Exception {
int degreeOfParallelism = 2;
- int numSubTasksPerInstance = degreeOfParallelism;
String pageWithRankInputPath = ""; //"file://" + PlayConstants.PLAY_DIR + "test-inputs/danglingpagerank/pageWithRank";
String adjacencyListInputPath = ""; //"file://" + PlayConstants.PLAY_DIR +
// "test-inputs/danglingpagerank/adjacencylists";
@@ -109,31 +108,32 @@ public class CustomCompensatableDanglingPageRank {
int failingIteration = 2;
double messageLoss = 0.75;
- if (args.length >= 15) {
+ if (args.length >= 14) {
degreeOfParallelism = Integer.parseInt(args[0]);
- numSubTasksPerInstance = Integer.parseInt(args[1]);
- pageWithRankInputPath = args[2];
- adjacencyListInputPath = args[3];
- outputPath = args[4];
-// confPath = args[5];
- minorConsumer = Integer.parseInt(args[6]);
- matchMemory = Integer.parseInt(args[7]);
- coGroupSortMemory = Integer.parseInt(args[8]);
- numIterations = Integer.parseInt(args[9]);
- numVertices = Long.parseLong(args[10]);
- numDanglingVertices = Long.parseLong(args[11]);
- failingWorkers = args[12];
- failingIteration = Integer.parseInt(args[13]);
- messageLoss = Double.parseDouble(args[14]);
+ pageWithRankInputPath = args[1];
+ adjacencyListInputPath = args[2];
+ outputPath = args[3];
+// confPath = args[4];
+ minorConsumer = Integer.parseInt(args[5]);
+ matchMemory = Integer.parseInt(args[6]);
+ coGroupSortMemory = Integer.parseInt(args[7]);
+ numIterations = Integer.parseInt(args[8]);
+ numVertices = Long.parseLong(args[9]);
+ numDanglingVertices = Long.parseLong(args[10]);
+ failingWorkers = args[11];
+ failingIteration = Integer.parseInt(args[12]);
+ messageLoss = Double.parseDouble(args[13]);
}
+ int totalMemoryConsumption = 3*minorConsumer + matchMemory + coGroupSortMemory;
+
JobGraph jobGraph = new JobGraph("CompensatableDanglingPageRank");
// --------------- the inputs ---------------------
// page rank input
JobInputVertex pageWithRankInput = JobGraphUtils.createInput(new CustomImprovedDanglingPageRankInputFormat(),
- pageWithRankInputPath, "DanglingPageWithRankInput", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ pageWithRankInputPath, "DanglingPageWithRankInput", jobGraph, degreeOfParallelism);
TaskConfig pageWithRankInputConfig = new TaskConfig(pageWithRankInput.getConfiguration());
pageWithRankInputConfig.addOutputShipStrategy(ShipStrategyType.PARTITION_HASH);
pageWithRankInputConfig.setOutputComparator(vertexWithRankAndDanglingComparator, 0);
@@ -142,7 +142,7 @@ public class CustomCompensatableDanglingPageRank {
// edges as adjacency list
JobInputVertex adjacencyListInput = JobGraphUtils.createInput(new CustomImprovedAdjacencyListInputFormat(),
- adjacencyListInputPath, "AdjancencyListInput", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ adjacencyListInputPath, "AdjancencyListInput", jobGraph, degreeOfParallelism);
TaskConfig adjacencyListInputConfig = new TaskConfig(adjacencyListInput.getConfiguration());
adjacencyListInputConfig.addOutputShipStrategy(ShipStrategyType.PARTITION_HASH);
adjacencyListInputConfig.setOutputSerializer(vertexWithAdjacencyListSerializer);
@@ -150,7 +150,7 @@ public class CustomCompensatableDanglingPageRank {
// --------------- the head ---------------------
JobTaskVertex head = JobGraphUtils.createTask(IterationHeadPactTask.class, "IterationHead", jobGraph,
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
TaskConfig headConfig = new TaskConfig(head.getConfiguration());
headConfig.setIterationId(ITERATION_ID);
@@ -160,12 +160,12 @@ public class CustomCompensatableDanglingPageRank {
headConfig.setInputSerializer(vertexWithRankAndDanglingSerializer, 0);
headConfig.setInputComparator(vertexWithRankAndDanglingComparator, 0);
headConfig.setInputLocalStrategy(0, LocalStrategy.SORT);
- headConfig.setMemoryInput(0, minorConsumer * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeMemoryInput(0, (double) minorConsumer / totalMemoryConsumption);
headConfig.setFilehandlesInput(0, NUM_FILE_HANDLES_PER_SORT);
headConfig.setSpillingThresholdInput(0, SORT_SPILL_THRESHOLD);
// back channel / iterations
- headConfig.setBackChannelMemory(minorConsumer * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeBackChannelMemory((double) minorConsumer / totalMemoryConsumption);
// output into iteration
headConfig.setOutputSerializer(vertexWithRankAndDanglingSerializer);
@@ -195,13 +195,13 @@ public class CustomCompensatableDanglingPageRank {
// --------------- the join ---------------------
JobTaskVertex intermediate = JobGraphUtils.createTask(IterationIntermediatePactTask.class,
- "IterationIntermediate", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ "IterationIntermediate", jobGraph, degreeOfParallelism);
TaskConfig intermediateConfig = new TaskConfig(intermediate.getConfiguration());
intermediateConfig.setIterationId(ITERATION_ID);
// intermediateConfig.setDriver(RepeatableHashjoinMatchDriverWithCachedBuildside.class);
intermediateConfig.setDriver(BuildSecondCachedMatchDriver.class);
intermediateConfig.setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- intermediateConfig.setMemoryDriver(matchMemory * JobGraphUtils.MEGABYTE);
+ intermediateConfig.setRelativeMemoryDriver((double) matchMemory / totalMemoryConsumption);
intermediateConfig.addInputToGroup(0);
intermediateConfig.addInputToGroup(1);
intermediateConfig.setInputSerializer(vertexWithRankAndDanglingSerializer, 0);
@@ -223,7 +223,7 @@ public class CustomCompensatableDanglingPageRank {
// ---------------- the tail (co group) --------------------
JobTaskVertex tail = JobGraphUtils.createTask(IterationTailPactTask.class, "IterationTail", jobGraph,
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
TaskConfig tailConfig = new TaskConfig(tail.getConfiguration());
tailConfig.setIterationId(ITERATION_ID);
tailConfig.setIsWorksetUpdate();
@@ -240,10 +240,10 @@ public class CustomCompensatableDanglingPageRank {
tailConfig.setDriverComparator(vertexWithRankComparator, 1);
tailConfig.setDriverPairComparator(coGroupComparator);
tailConfig.setInputAsynchronouslyMaterialized(0, true);
- tailConfig.setInputMaterializationMemory(0, minorConsumer * JobGraphUtils.MEGABYTE);
+ tailConfig.setRelativeInputMaterializationMemory(0, (double)minorConsumer/totalMemoryConsumption);
tailConfig.setInputLocalStrategy(1, LocalStrategy.SORT);
tailConfig.setInputComparator(vertexWithRankComparator, 1);
- tailConfig.setMemoryInput(1, coGroupSortMemory * JobGraphUtils.MEGABYTE);
+ tailConfig.setRelativeMemoryInput(1, (double) coGroupSortMemory / totalMemoryConsumption);
tailConfig.setFilehandlesInput(1, NUM_FILE_HANDLES_PER_SORT);
tailConfig.setSpillingThresholdInput(1, SORT_SPILL_THRESHOLD);
@@ -261,8 +261,7 @@ public class CustomCompensatableDanglingPageRank {
// --------------- the output ---------------------
- JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "FinalOutput", degreeOfParallelism,
- numSubTasksPerInstance);
+ JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "FinalOutput", degreeOfParallelism);
TaskConfig outputConfig = new TaskConfig(output.getConfiguration());
outputConfig.addInputToGroup(0);
outputConfig.setInputSerializer(vertexWithRankAndDanglingSerializer, 0);
@@ -272,7 +271,7 @@ public class CustomCompensatableDanglingPageRank {
// --------------- the auxiliaries ---------------------
JobOutputVertex fakeTailOutput = JobGraphUtils.createFakeOutput(jobGraph, "FakeTailOutput",
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
JobOutputVertex sync = JobGraphUtils.createSync(jobGraph, degreeOfParallelism);
TaskConfig syncConfig = new TaskConfig(sync.getConfiguration());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRankWithCombiner.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRankWithCombiner.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRankWithCombiner.java
index 37fab39..698296a 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRankWithCombiner.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/customdanglingpagerank/CustomCompensatableDanglingPageRankWithCombiner.java
@@ -93,7 +93,6 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
public static JobGraph getJobGraph(String[] args) throws Exception {
int degreeOfParallelism = 2;
- int numSubTasksPerInstance = degreeOfParallelism;
String pageWithRankInputPath = ""; //"file://" + PlayConstants.PLAY_DIR + "test-inputs/danglingpagerank/pageWithRank";
String adjacencyListInputPath = ""; //"file://" + PlayConstants.PLAY_DIR +
// "test-inputs/danglingpagerank/adjacencylists";
@@ -109,31 +108,32 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
int failingIteration = 2;
double messageLoss = 0.75;
- if (args.length >= 15) {
+ if (args.length >= 14) {
degreeOfParallelism = Integer.parseInt(args[0]);
- numSubTasksPerInstance = Integer.parseInt(args[1]);
- pageWithRankInputPath = args[2];
- adjacencyListInputPath = args[3];
- outputPath = args[4];
- // [5] is config path
- minorConsumer = Integer.parseInt(args[6]);
- matchMemory = Integer.parseInt(args[7]);
- coGroupSortMemory = Integer.parseInt(args[8]);
- numIterations = Integer.parseInt(args[9]);
- numVertices = Long.parseLong(args[10]);
- numDanglingVertices = Long.parseLong(args[11]);
- failingWorkers = args[12];
- failingIteration = Integer.parseInt(args[13]);
- messageLoss = Double.parseDouble(args[14]);
+ pageWithRankInputPath = args[1];
+ adjacencyListInputPath = args[2];
+ outputPath = args[3];
+ // [4] is config path
+ minorConsumer = Integer.parseInt(args[5]);
+ matchMemory = Integer.parseInt(args[6]);
+ coGroupSortMemory = Integer.parseInt(args[7]);
+ numIterations = Integer.parseInt(args[8]);
+ numVertices = Long.parseLong(args[9]);
+ numDanglingVertices = Long.parseLong(args[10]);
+ failingWorkers = args[11];
+ failingIteration = Integer.parseInt(args[12]);
+ messageLoss = Double.parseDouble(args[13]);
}
+ int totalMemoryConsumption = 3*minorConsumer + 2*coGroupSortMemory + matchMemory;
+
JobGraph jobGraph = new JobGraph("CompensatableDanglingPageRank");
// --------------- the inputs ---------------------
// page rank input
JobInputVertex pageWithRankInput = JobGraphUtils.createInput(new CustomImprovedDanglingPageRankInputFormat(),
- pageWithRankInputPath, "DanglingPageWithRankInput", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ pageWithRankInputPath, "DanglingPageWithRankInput", jobGraph, degreeOfParallelism);
TaskConfig pageWithRankInputConfig = new TaskConfig(pageWithRankInput.getConfiguration());
pageWithRankInputConfig.addOutputShipStrategy(ShipStrategyType.PARTITION_HASH);
pageWithRankInputConfig.setOutputComparator(vertexWithRankAndDanglingComparator, 0);
@@ -142,7 +142,7 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
// edges as adjacency list
JobInputVertex adjacencyListInput = JobGraphUtils.createInput(new CustomImprovedAdjacencyListInputFormat(),
- adjacencyListInputPath, "AdjancencyListInput", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ adjacencyListInputPath, "AdjancencyListInput", jobGraph, degreeOfParallelism);
TaskConfig adjacencyListInputConfig = new TaskConfig(adjacencyListInput.getConfiguration());
adjacencyListInputConfig.addOutputShipStrategy(ShipStrategyType.PARTITION_HASH);
adjacencyListInputConfig.setOutputSerializer(vertexWithAdjacencyListSerializer);
@@ -150,7 +150,7 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
// --------------- the head ---------------------
JobTaskVertex head = JobGraphUtils.createTask(IterationHeadPactTask.class, "IterationHead", jobGraph,
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
TaskConfig headConfig = new TaskConfig(head.getConfiguration());
headConfig.setIterationId(ITERATION_ID);
@@ -160,12 +160,12 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
headConfig.setInputSerializer(vertexWithRankAndDanglingSerializer, 0);
headConfig.setInputComparator(vertexWithRankAndDanglingComparator, 0);
headConfig.setInputLocalStrategy(0, LocalStrategy.SORT);
- headConfig.setMemoryInput(0, minorConsumer * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeMemoryInput(0, (double)minorConsumer/totalMemoryConsumption);
headConfig.setFilehandlesInput(0, NUM_FILE_HANDLES_PER_SORT);
headConfig.setSpillingThresholdInput(0, SORT_SPILL_THRESHOLD);
// back channel / iterations
- headConfig.setBackChannelMemory(minorConsumer * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeBackChannelMemory((double)minorConsumer/totalMemoryConsumption);
// output into iteration
headConfig.setOutputSerializer(vertexWithRankAndDanglingSerializer);
@@ -195,13 +195,13 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
// --------------- the join ---------------------
JobTaskVertex intermediate = JobGraphUtils.createTask(IterationIntermediatePactTask.class,
- "IterationIntermediate", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ "IterationIntermediate", jobGraph, degreeOfParallelism);
TaskConfig intermediateConfig = new TaskConfig(intermediate.getConfiguration());
intermediateConfig.setIterationId(ITERATION_ID);
// intermediateConfig.setDriver(RepeatableHashjoinMatchDriverWithCachedBuildside.class);
intermediateConfig.setDriver(BuildSecondCachedMatchDriver.class);
intermediateConfig.setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- intermediateConfig.setMemoryDriver(matchMemory * JobGraphUtils.MEGABYTE);
+ intermediateConfig.setRelativeMemoryDriver((double)matchMemory/totalMemoryConsumption);
intermediateConfig.addInputToGroup(0);
intermediateConfig.addInputToGroup(1);
intermediateConfig.setInputSerializer(vertexWithRankAndDanglingSerializer, 0);
@@ -225,7 +225,7 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
combinerConfig.setInputSerializer(vertexWithRankSerializer, 0);
combinerConfig.setDriverStrategy(DriverStrategy.SORTED_GROUP_COMBINE);
combinerConfig.setDriverComparator(vertexWithRankComparator, 0);
- combinerConfig.setMemoryDriver(coGroupSortMemory * JobGraphUtils.MEGABYTE);
+ combinerConfig.setRelativeMemoryDriver((double)coGroupSortMemory/totalMemoryConsumption);
combinerConfig.setOutputSerializer(vertexWithRankSerializer);
combinerConfig.addOutputShipStrategy(ShipStrategyType.PARTITION_HASH);
combinerConfig.setOutputComparator(vertexWithRankComparator, 0);
@@ -235,7 +235,7 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
// ---------------- the tail (co group) --------------------
JobTaskVertex tail = JobGraphUtils.createTask(IterationTailPactTask.class, "IterationTail", jobGraph,
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
TaskConfig tailConfig = new TaskConfig(tail.getConfiguration());
tailConfig.setIterationId(ITERATION_ID);
tailConfig.setIsWorksetUpdate();
@@ -251,10 +251,10 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
tailConfig.setDriverComparator(vertexWithRankComparator, 1);
tailConfig.setDriverPairComparator(coGroupComparator);
tailConfig.setInputAsynchronouslyMaterialized(0, true);
- tailConfig.setInputMaterializationMemory(0, minorConsumer * JobGraphUtils.MEGABYTE);
+ tailConfig.setRelativeInputMaterializationMemory(0, (double)minorConsumer/totalMemoryConsumption);
tailConfig.setInputLocalStrategy(1, LocalStrategy.SORT);
tailConfig.setInputComparator(vertexWithRankComparator, 1);
- tailConfig.setMemoryInput(1, coGroupSortMemory * JobGraphUtils.MEGABYTE);
+ tailConfig.setRelativeMemoryInput(1, (double)coGroupSortMemory/totalMemoryConsumption);
tailConfig.setFilehandlesInput(1, NUM_FILE_HANDLES_PER_SORT);
tailConfig.setSpillingThresholdInput(1, SORT_SPILL_THRESHOLD);
tailConfig.addIterationAggregator(CustomCompensatableDotProductCoGroup.AGGREGATOR_NAME, new PageRankStatsAggregator());
@@ -273,8 +273,7 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
// --------------- the output ---------------------
- JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "FinalOutput", degreeOfParallelism,
- numSubTasksPerInstance);
+ JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "FinalOutput", degreeOfParallelism);
TaskConfig outputConfig = new TaskConfig(output.getConfiguration());
outputConfig.addInputToGroup(0);
outputConfig.setInputSerializer(vertexWithRankAndDanglingSerializer, 0);
@@ -284,7 +283,7 @@ public class CustomCompensatableDanglingPageRankWithCombiner {
// --------------- the auxiliaries ---------------------
JobOutputVertex fakeTailOutput = JobGraphUtils.createFakeOutput(jobGraph, "FakeTailOutput",
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
JobOutputVertex sync = JobGraphUtils.createSync(jobGraph, degreeOfParallelism);
TaskConfig syncConfig = new TaskConfig(sync.getConfiguration());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/danglingpagerank/CompensatableDanglingPageRank.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/danglingpagerank/CompensatableDanglingPageRank.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/danglingpagerank/CompensatableDanglingPageRank.java
index 944f13b..f870fe6 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/danglingpagerank/CompensatableDanglingPageRank.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/danglingpagerank/CompensatableDanglingPageRank.java
@@ -72,7 +72,6 @@ public class CompensatableDanglingPageRank {
public static JobGraph getJobGraph(String[] args) throws Exception {
int degreeOfParallelism = 2;
- int numSubTasksPerInstance = degreeOfParallelism;
String pageWithRankInputPath = ""; // "file://" + PlayConstants.PLAY_DIR + "test-inputs/danglingpagerank/pageWithRank";
String adjacencyListInputPath = ""; // "file://" + PlayConstants.PLAY_DIR +
// "test-inputs/danglingpagerank/adjacencylists";
@@ -91,29 +90,30 @@ public class CompensatableDanglingPageRank {
if (args.length >= 15) {
degreeOfParallelism = Integer.parseInt(args[0]);
- numSubTasksPerInstance = Integer.parseInt(args[1]);
- pageWithRankInputPath = args[2];
- adjacencyListInputPath = args[3];
- outputPath = args[4];
-// confPath = args[5];
- minorConsumer = Integer.parseInt(args[6]);
- matchMemory = Integer.parseInt(args[7]);
- coGroupSortMemory = Integer.parseInt(args[8]);
- numIterations = Integer.parseInt(args[9]);
- numVertices = Long.parseLong(args[10]);
- numDanglingVertices = Long.parseLong(args[11]);
- failingWorkers = args[12];
- failingIteration = Integer.parseInt(args[13]);
- messageLoss = Double.parseDouble(args[14]);
+ pageWithRankInputPath = args[1];
+ adjacencyListInputPath = args[2];
+ outputPath = args[3];
+// confPath = args[4];
+ minorConsumer = Integer.parseInt(args[5]);
+ matchMemory = Integer.parseInt(args[6]);
+ coGroupSortMemory = Integer.parseInt(args[7]);
+ numIterations = Integer.parseInt(args[8]);
+ numVertices = Long.parseLong(args[9]);
+ numDanglingVertices = Long.parseLong(args[10]);
+ failingWorkers = args[11];
+ failingIteration = Integer.parseInt(args[12]);
+ messageLoss = Double.parseDouble(args[13]);
}
+ int totalMemoryConsumption = 3*minorConsumer + matchMemory + coGroupSortMemory;
+
JobGraph jobGraph = new JobGraph("CompensatableDanglingPageRank");
// --------------- the inputs ---------------------
// page rank input
JobInputVertex pageWithRankInput = JobGraphUtils.createInput(new ImprovedDanglingPageRankInputFormat(),
- pageWithRankInputPath, "DanglingPageWithRankInput", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ pageWithRankInputPath, "DanglingPageWithRankInput", jobGraph, degreeOfParallelism);
TaskConfig pageWithRankInputConfig = new TaskConfig(pageWithRankInput.getConfiguration());
pageWithRankInputConfig.addOutputShipStrategy(ShipStrategyType.PARTITION_HASH);
pageWithRankInputConfig.setOutputComparator(fieldZeroComparator, 0);
@@ -122,7 +122,7 @@ public class CompensatableDanglingPageRank {
// edges as adjacency list
JobInputVertex adjacencyListInput = JobGraphUtils.createInput(new ImprovedAdjacencyListInputFormat(),
- adjacencyListInputPath, "AdjancencyListInput", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ adjacencyListInputPath, "AdjancencyListInput", jobGraph, degreeOfParallelism);
TaskConfig adjacencyListInputConfig = new TaskConfig(adjacencyListInput.getConfiguration());
adjacencyListInputConfig.addOutputShipStrategy(ShipStrategyType.PARTITION_HASH);
adjacencyListInputConfig.setOutputSerializer(recSerializer);
@@ -130,7 +130,7 @@ public class CompensatableDanglingPageRank {
// --------------- the head ---------------------
JobTaskVertex head = JobGraphUtils.createTask(IterationHeadPactTask.class, "IterationHead", jobGraph,
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
TaskConfig headConfig = new TaskConfig(head.getConfiguration());
headConfig.setIterationId(ITERATION_ID);
@@ -140,12 +140,12 @@ public class CompensatableDanglingPageRank {
headConfig.setInputSerializer(recSerializer, 0);
headConfig.setInputComparator(fieldZeroComparator, 0);
headConfig.setInputLocalStrategy(0, LocalStrategy.SORT);
- headConfig.setMemoryInput(0, minorConsumer * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeMemoryInput(0, (double)minorConsumer/totalMemoryConsumption);
headConfig.setFilehandlesInput(0, NUM_FILE_HANDLES_PER_SORT);
headConfig.setSpillingThresholdInput(0, SORT_SPILL_THRESHOLD);
// back channel / iterations
- headConfig.setBackChannelMemory(minorConsumer * JobGraphUtils.MEGABYTE);
+ headConfig.setRelativeBackChannelMemory((double)minorConsumer/totalMemoryConsumption);
// output into iteration
headConfig.setOutputSerializer(recSerializer);
@@ -175,13 +175,13 @@ public class CompensatableDanglingPageRank {
// --------------- the join ---------------------
JobTaskVertex intermediate = JobGraphUtils.createTask(IterationIntermediatePactTask.class,
- "IterationIntermediate", jobGraph, degreeOfParallelism, numSubTasksPerInstance);
+ "IterationIntermediate", jobGraph, degreeOfParallelism);
TaskConfig intermediateConfig = new TaskConfig(intermediate.getConfiguration());
intermediateConfig.setIterationId(ITERATION_ID);
// intermediateConfig.setDriver(RepeatableHashjoinMatchDriverWithCachedBuildside.class);
intermediateConfig.setDriver(BuildSecondCachedMatchDriver.class);
intermediateConfig.setDriverStrategy(DriverStrategy.HYBRIDHASH_BUILD_SECOND);
- intermediateConfig.setMemoryDriver(matchMemory * JobGraphUtils.MEGABYTE);
+ intermediateConfig.setRelativeMemoryDriver((double)matchMemory/totalMemoryConsumption);
intermediateConfig.addInputToGroup(0);
intermediateConfig.addInputToGroup(1);
intermediateConfig.setInputSerializer(recSerializer, 0);
@@ -203,7 +203,7 @@ public class CompensatableDanglingPageRank {
// ---------------- the tail (co group) --------------------
JobTaskVertex tail = JobGraphUtils.createTask(IterationTailPactTask.class, "IterationTail", jobGraph,
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
TaskConfig tailConfig = new TaskConfig(tail.getConfiguration());
tailConfig.setIterationId(ITERATION_ID);
tailConfig.setIsWorksetUpdate();
@@ -220,10 +220,10 @@ public class CompensatableDanglingPageRank {
tailConfig.setDriverComparator(fieldZeroComparator, 1);
tailConfig.setDriverPairComparator(pairComparatorFactory);
tailConfig.setInputAsynchronouslyMaterialized(0, true);
- tailConfig.setInputMaterializationMemory(0, minorConsumer * JobGraphUtils.MEGABYTE);
+ tailConfig.setRelativeInputMaterializationMemory(0, (double)minorConsumer/totalMemoryConsumption);
tailConfig.setInputLocalStrategy(1, LocalStrategy.SORT);
tailConfig.setInputComparator(fieldZeroComparator, 1);
- tailConfig.setMemoryInput(1, coGroupSortMemory * JobGraphUtils.MEGABYTE);
+ tailConfig.setRelativeMemoryInput(1, (double)coGroupSortMemory/totalMemoryConsumption);
tailConfig.setFilehandlesInput(1, NUM_FILE_HANDLES_PER_SORT);
tailConfig.setSpillingThresholdInput(1, SORT_SPILL_THRESHOLD);
@@ -241,8 +241,7 @@ public class CompensatableDanglingPageRank {
// --------------- the output ---------------------
- JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "FinalOutput", degreeOfParallelism,
- numSubTasksPerInstance);
+ JobOutputVertex output = JobGraphUtils.createFileOutput(jobGraph, "FinalOutput", degreeOfParallelism);
TaskConfig outputConfig = new TaskConfig(output.getConfiguration());
outputConfig.addInputToGroup(0);
outputConfig.setInputSerializer(recSerializer, 0);
@@ -252,7 +251,7 @@ public class CompensatableDanglingPageRank {
// --------------- the auxiliaries ---------------------
JobOutputVertex fakeTailOutput = JobGraphUtils.createFakeOutput(jobGraph, "FakeTailOutput",
- degreeOfParallelism, numSubTasksPerInstance);
+ degreeOfParallelism);
JobOutputVertex sync = JobGraphUtils.createSync(jobGraph, degreeOfParallelism);
TaskConfig syncConfig = new TaskConfig(sync.getConfiguration());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
index 3626ba7..0e297ec 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/localDistributed/PackagedProgramEndToEndITCase.java
@@ -14,6 +14,7 @@ package eu.stratosphere.test.localDistributed;
import java.io.File;
import java.io.FileWriter;
+import java.net.URL;
import eu.stratosphere.client.minicluster.NepheleMiniCluster;
import org.junit.Assert;
@@ -28,6 +29,8 @@ import eu.stratosphere.util.LogUtils;
public class PackagedProgramEndToEndITCase {
+ private static final int DOP = 4;
+
static {
LogUtils.initializeDefaultTestConsoleLogger();
}
@@ -53,16 +56,18 @@ public class PackagedProgramEndToEndITCase {
fwClusters.write(KMeansData.INITIAL_CENTERS);
fwClusters.close();
- String jarPath = "target/maven-test-jar.jar";
+ URL jarFileURL = getClass().getResource("/KMeansForTest.jar");
+ String jarPath = jarFileURL.getFile();
// run KMeans
- cluster.setNumTaskManager(2);
+ cluster.setNumTaskTracker(2);
+ cluster.setTaskManagerNumSlots(2);
cluster.start();
RemoteExecutor ex = new RemoteExecutor("localhost", 6498);
-
+
ex.executeJar(jarPath,
- "eu.stratosphere.test.util.testjar.KMeansForTest",
- new String[] {
+ "eu.stratosphere.examples.scala.testing.KMeansForTest",
+ new String[] {new Integer(DOP).toString(),
points.toURI().toString(),
clusters.toURI().toString(),
outFile.toURI().toString(),
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/operators/UnionSinkITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/operators/UnionSinkITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/operators/UnionSinkITCase.java
index b9f0f2c..2637acb 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/operators/UnionSinkITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/operators/UnionSinkITCase.java
@@ -44,6 +44,7 @@ public class UnionSinkITCase extends RecordAPITestBase {
public UnionSinkITCase(Configuration testConfig) {
super(testConfig);
+ setTaskManagerNumSlots(DOP);
}
private static final String MAP_IN = "1 1\n2 2\n2 8\n4 4\n4 4\n6 6\n7 7\n8 8\n" +
@@ -115,7 +116,7 @@ public class UnionSinkITCase extends RecordAPITestBase {
output.addInput(testMapper2);
Plan plan = new Plan(output);
- plan.setDefaultParallelism(4);
+ plan.setDefaultParallelism(DOP);
PactCompiler pc = new PactCompiler(new DataStatistics());
OptimizedPlan op = pc.compile(plan);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/CollectionSourceTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/CollectionSourceTest.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/CollectionSourceTest.java
index 48f703a..1f30075 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/CollectionSourceTest.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/CollectionSourceTest.java
@@ -36,8 +36,14 @@ import eu.stratosphere.util.Collector;
*/
public class CollectionSourceTest extends RecordAPITestBase {
+ private static final int DOP = 4;
+
protected String resultPath;
+ public CollectionSourceTest(){
+ setTaskManagerNumSlots(DOP);
+ }
+
public static class Join extends JoinFunction {
private static final long serialVersionUID = 1L;
@@ -110,7 +116,7 @@ public class CollectionSourceTest extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
- return getPlan(4, resultPath);
+ return getPlan(DOP, resultPath);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/ComputeEdgeDegreesITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/ComputeEdgeDegreesITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/ComputeEdgeDegreesITCase.java
index f52e108..7a86112 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/ComputeEdgeDegreesITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/ComputeEdgeDegreesITCase.java
@@ -35,6 +35,7 @@ public class ComputeEdgeDegreesITCase extends RecordAPITestBase {
public ComputeEdgeDegreesITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -58,7 +59,7 @@ public class ComputeEdgeDegreesITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("ComputeEdgeDegreesTest#NumSubtasks", 4);
+ config.setInteger("ComputeEdgeDegreesTest#NumSubtasks", DOP);
return toParameterList(config);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesOnEdgesWithDegreesITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesOnEdgesWithDegreesITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesOnEdgesWithDegreesITCase.java
index b4b0386..68d66a6 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesOnEdgesWithDegreesITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesOnEdgesWithDegreesITCase.java
@@ -35,6 +35,7 @@ public class EnumTrianglesOnEdgesWithDegreesITCase extends RecordAPITestBase {
public EnumTrianglesOnEdgesWithDegreesITCase(Configuration config) {
super(config);
+ setTaskManagerNumSlots(DOP);
}
@Override
@@ -60,7 +61,7 @@ public class EnumTrianglesOnEdgesWithDegreesITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("EnumTrianglesTest#NumSubtasks", 4);
+ config.setInteger("EnumTrianglesTest#NumSubtasks", DOP);
return toParameterList(config);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesRDFITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesRDFITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesRDFITCase.java
index 945cc67..93e9bfe 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesRDFITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/EnumTrianglesRDFITCase.java
@@ -54,7 +54,7 @@ public class EnumTrianglesRDFITCase extends RecordAPITestBase {
protected Plan getTestJob() {
EnumTrianglesRdfFoaf enumTriangles = new EnumTrianglesRdfFoaf();
return enumTriangles.getPlan(
- config.getString("EnumTrianglesTest#NoSubtasks", "4"), edgesPath, resultPath);
+ config.getString("EnumTrianglesTest#NoSubtasks", new Integer(DOP).toString()), edgesPath, resultPath);
}
@Override
@@ -65,7 +65,7 @@ public class EnumTrianglesRDFITCase extends RecordAPITestBase {
@Parameters
public static Collection<Object[]> getConfigurations() {
Configuration config = new Configuration();
- config.setInteger("EnumTrianglesTest#NoSubtasks", 4);
+ config.setInteger("EnumTrianglesTest#NoSubtasks", DOP);
return toParameterList(config);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingITCase.java
index 7aa9a78..f628ca5 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobTests/GlobalSortingITCase.java
@@ -38,6 +38,9 @@ public class GlobalSortingITCase extends RecordAPITestBase {
private String sortedRecords;
+ public GlobalSortingITCase(){
+ setTaskManagerNumSlots(DOP);
+ }
@Override
protected void preSubmit() throws Exception {
@@ -77,7 +80,7 @@ public class GlobalSortingITCase extends RecordAPITestBase {
@Override
protected Plan getTestJob() {
GlobalSort globalSort = new GlobalSort();
- return globalSort.getPlan("4", recordsPath, resultPath);
+ return globalSort.getPlan(new Integer(DOP).toString(), recordsPath, resultPath);
}
@Override
[05/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildSecondHashMatchIterator.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildSecondHashMatchIterator.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildSecondHashMatchIterator.java
index f1525d8..732d256 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildSecondHashMatchIterator.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/BuildSecondHashMatchIterator.java
@@ -60,7 +60,7 @@ public final class BuildSecondHashMatchIterator<V1, V2, O> implements JoinTaskIt
TypeSerializer<V1> serializer1, TypeComparator<V1> comparator1,
TypeSerializer<V2> serializer2, TypeComparator<V2> comparator2,
TypePairComparator<V1, V2> pairComparator,
- MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, long totalMemory)
+ MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, double memoryFraction)
throws MemoryAllocationException
{
this.memManager = memManager;
@@ -73,7 +73,7 @@ public final class BuildSecondHashMatchIterator<V1, V2, O> implements JoinTaskIt
this.probeCopy = serializer1.createInstance();
this.hashJoin = getHashJoin(serializer2, comparator2, serializer1, comparator1, pairComparator,
- memManager, ioManager, ownerTask, totalMemory);
+ memManager, ioManager, ownerTask, memoryFraction);
}
// --------------------------------------------------------------------------------------------
@@ -149,10 +149,10 @@ public final class BuildSecondHashMatchIterator<V1, V2, O> implements JoinTaskIt
public <BT, PT> MutableHashTable<BT, PT> getHashJoin(TypeSerializer<BT> buildSideSerializer, TypeComparator<BT> buildSideComparator,
TypeSerializer<PT> probeSideSerializer, TypeComparator<PT> probeSideComparator,
TypePairComparator<PT, BT> pairComparator,
- MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, long totalMemory)
+ MemoryManager memManager, IOManager ioManager, AbstractInvokable ownerTask, double memoryFraction)
throws MemoryAllocationException
{
- final int numPages = memManager.computeNumberOfPages(totalMemory);
+ final int numPages = memManager.computeNumberOfPages(memoryFraction);
final List<MemorySegment> memorySegments = memManager.allocatePages(ownerTask, numPages);
return new MutableHashTable<BT, PT>(buildSideSerializer, probeSideSerializer, buildSideComparator, probeSideComparator, pairComparator, memorySegments, ioManager);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/InMemoryPartition.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/InMemoryPartition.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/InMemoryPartition.java
index 5587ad2..9cc4f49 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/InMemoryPartition.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/hash/InMemoryPartition.java
@@ -310,6 +310,7 @@ public class InMemoryPartition<T> {
return posInArray;
}
+ @SuppressWarnings("unused")
public void setSegmentNumberOffset(int offset) {
this.segmentNumberOffset = offset;
}
@@ -364,6 +365,7 @@ public class InMemoryPartition<T> {
seekInput(this.segments.get(bufferNum), offset, this.segmentSizeMask + 1);
}
+ @SuppressWarnings("unused")
public void setSegmentNumberOffset(int offset) {
this.segmentNumberOffset = offset;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationHeadPactTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationHeadPactTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationHeadPactTask.java
index 89571c4..b94e276 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationHeadPactTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationHeadPactTask.java
@@ -130,7 +130,8 @@ public class IterationHeadPactTask<X, Y, S extends Function, OT> extends Abstrac
private BlockingBackChannel initBackChannel() throws Exception {
/* get the size of the memory available to the backchannel */
- int backChannelMemoryPages = getMemoryManager().computeNumberOfPages(this.config.getBackChannelMemory());
+ int backChannelMemoryPages = getMemoryManager().computeNumberOfPages(this.config.getRelativeBackChannelMemory
+ ());
/* allocate the memory available to the backchannel */
List<MemorySegment> segments = new ArrayList<MemorySegment>();
@@ -150,7 +151,7 @@ public class IterationHeadPactTask<X, Y, S extends Function, OT> extends Abstrac
private <BT> CompactingHashTable<BT> initCompactingHashTable() throws Exception {
// get some memory
- long hashjoinMemorySize = config.getSolutionSetMemory();
+ double hashjoinMemorySize = config.getRelativeSolutionSetMemory();
TypeSerializerFactory<BT> solutionTypeSerializerFactory = config.getSolutionSetSerializer(userCodeClassLoader);
TypeComparatorFactory<BT> solutionTypeComparatorFactory = config.getSolutionSetComparator(userCodeClassLoader);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/shipping/ShipStrategyType.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/shipping/ShipStrategyType.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/shipping/ShipStrategyType.java
index a8a1293..ba38821 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/shipping/ShipStrategyType.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/shipping/ShipStrategyType.java
@@ -22,45 +22,42 @@ public enum ShipStrategyType {
/**
* Constant used as an indicator for an unassigned ship strategy.
*/
- NONE(false, false, false),
+ NONE(false, false),
/**
- * Forwarding the data preserving all global properties.
+ * Forwarding the data locally in memory.
*/
- FORWARD(false, false, false),
+ FORWARD(false, false),
/**
* Repartitioning the data randomly, typically when the degree of parallelism between two nodes changes.
*/
- PARTITION_RANDOM(true, true, false),
+ PARTITION_RANDOM(true, false),
/**
* Repartitioning the data deterministically through a hash function.
*/
- PARTITION_HASH(true, true, true),
+ PARTITION_HASH(true, true),
/**
* Partitioning the data in ranges according to a total order.
*/
- PARTITION_RANGE(true, true, true),
+ PARTITION_RANGE(true, true),
/**
* Replicating the data set to all instances.
*/
- BROADCAST(true, true, false);
+ BROADCAST(true, false);
// --------------------------------------------------------------------------------------------
private final boolean isNetwork;
- private final boolean compensatesForLocalDOPChanges;
-
private final boolean requiresComparator;
- private ShipStrategyType(boolean network, boolean compensatesForLocalDOPChanges, boolean requiresComparator) {
+ private ShipStrategyType(boolean network, boolean requiresComparator) {
this.isNetwork = network;
- this.compensatesForLocalDOPChanges = compensatesForLocalDOPChanges;
this.requiresComparator = requiresComparator;
}
@@ -68,10 +65,6 @@ public enum ShipStrategyType {
return this.isNetwork;
}
- public boolean compensatesForLocalDOPChanges() {
- return this.compensatesForLocalDOPChanges;
- }
-
public boolean requiresComparator() {
return this.requiresComparator;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorter.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorter.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorter.java
index 0cf6bb0..35377cf 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorter.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorter.java
@@ -50,7 +50,7 @@ public class AsynchronousPartialSorter<E> extends UnilateralSortMerger<E> {
* @param parentTask The parent task, which owns all resources used by this sorter.
* @param serializerFactory The type serializer.
* @param comparator The type comparator establishing the order relation.
- * @param totalMemory The total amount of memory dedicated to sorting.
+ * @param memoryFraction The fraction of memory dedicated to sorting.
*
* @throws IOException Thrown, if an error occurs initializing the resources for external sorting.
* @throws MemoryAllocationException Thrown, if not enough memory can be obtained from the memory manager to
@@ -59,12 +59,13 @@ public class AsynchronousPartialSorter<E> extends UnilateralSortMerger<E> {
public AsynchronousPartialSorter(MemoryManager memoryManager,
MutableObjectIterator<E> input, AbstractInvokable parentTask,
TypeSerializerFactory<E> serializerFactory, TypeComparator<E> comparator,
- long totalMemory)
+ double memoryFraction)
throws IOException, MemoryAllocationException
{
- super(memoryManager, null, input, parentTask, serializerFactory, comparator, totalMemory,
- totalMemory < 2 * MIN_NUM_SORT_MEM_SEGMENTS * memoryManager.getPageSize() ? 1 :
- Math.max((int) Math.ceil(((double) totalMemory) / MAX_MEM_PER_PARTIAL_SORT), 2),
+ super(memoryManager, null, input, parentTask, serializerFactory, comparator, memoryFraction,
+ memoryManager.computeNumberOfPages(memoryFraction) < 2 * MIN_NUM_SORT_MEM_SEGMENTS ? 1 :
+ Math.max((int) Math.ceil(((double) memoryManager.computeMemorySize(memoryFraction)) /
+ MAX_MEM_PER_PARTIAL_SORT), 2),
2, 0.0f, true);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorterCollector.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorterCollector.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorterCollector.java
index 9064ab9..747f98b 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorterCollector.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/AsynchronousPartialSorterCollector.java
@@ -45,7 +45,7 @@ public class AsynchronousPartialSorterCollector<E> extends AsynchronousPartialSo
* @param parentTask The parent task, which owns all resources used by this sorter.
* @param serializerFactory The type serializer.
* @param comparator The type comparator establishing the order relation.
- * @param totalMemory The total amount of memory dedicated to sorting.
+ * @param memoryFraction The fraction of memory dedicated to sorting.
*
* @throws IOException Thrown, if an error occurs initializing the resources for external sorting.
* @throws MemoryAllocationException Thrown, if not enough memory can be obtained from the memory manager to
@@ -54,10 +54,11 @@ public class AsynchronousPartialSorterCollector<E> extends AsynchronousPartialSo
public AsynchronousPartialSorterCollector(MemoryManager memoryManager,
AbstractInvokable parentTask,
TypeSerializerFactory<E> serializerFactory, TypeComparator<E> comparator,
- long totalMemory)
+ double memoryFraction)
throws IOException, MemoryAllocationException
{
- super(memoryManager, null, parentTask, serializerFactory, comparator, totalMemory);
+ super(memoryManager, null, parentTask, serializerFactory, comparator,
+ memoryFraction);
}
// ------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMerger.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMerger.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMerger.java
index 51d136c..9eb0452 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMerger.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMerger.java
@@ -95,12 +95,11 @@ public class CombiningUnilateralSortMerger<E> extends UnilateralSortMerger<E> {
* @param parentTask The parent task, which owns all resources used by this sorter.
* @param serializerFactory The type serializer.
* @param comparator The type comparator establishing the order relation.
- * @param totalMemory The total amount of memory dedicated to sorting, merging and I/O.
+ * @param memoryFraction The fraction of memory dedicated to sorting, merging and I/O.
* @param maxNumFileHandles The maximum number of files to be merged at once.
* @param startSpillingFraction The faction of the buffers that have to be filled before the spilling thread
* actually begins spilling data to disk.
- * @param combineLastMerge A flag indicating whether the last merge step applies the combiner as well.
- *
+ *
* @throws IOException Thrown, if an error occurs initializing the resources for external sorting.
* @throws MemoryAllocationException Thrown, if not enough memory can be obtained from the memory manager to
* perform the sort.
@@ -108,11 +107,11 @@ public class CombiningUnilateralSortMerger<E> extends UnilateralSortMerger<E> {
public CombiningUnilateralSortMerger(GenericCombine<E> combineStub, MemoryManager memoryManager, IOManager ioManager,
MutableObjectIterator<E> input, AbstractInvokable parentTask,
TypeSerializerFactory<E> serializerFactory, TypeComparator<E> comparator,
- long totalMemory, int maxNumFileHandles, float startSpillingFraction)
+ double memoryFraction, int maxNumFileHandles, float startSpillingFraction)
throws IOException, MemoryAllocationException
{
this(combineStub, memoryManager, ioManager, input, parentTask, serializerFactory, comparator,
- totalMemory, -1, maxNumFileHandles, startSpillingFraction);
+ memoryFraction, -1, maxNumFileHandles, startSpillingFraction);
}
/**
@@ -127,13 +126,12 @@ public class CombiningUnilateralSortMerger<E> extends UnilateralSortMerger<E> {
* @param parentTask The parent task, which owns all resources used by this sorter.
* @param serializerFactory The type serializer.
* @param comparator The type comparator establishing the order relation.
- * @param totalMemory The total amount of memory dedicated to sorting, merging and I/O.
+ * @param memoryFraction The fraction of memory dedicated to sorting, merging and I/O.
* @param numSortBuffers The number of distinct buffers to use creation of the initial runs.
* @param maxNumFileHandles The maximum number of files to be merged at once.
* @param startSpillingFraction The faction of the buffers that have to be filled before the spilling thread
* actually begins spilling data to disk.
- * @param combineLastMerge A flag indicating whether the last merge step applies the combiner as well.
- *
+ *
* @throws IOException Thrown, if an error occurs initializing the resources for external sorting.
* @throws MemoryAllocationException Thrown, if not enough memory can be obtained from the memory manager to
* perform the sort.
@@ -141,12 +139,12 @@ public class CombiningUnilateralSortMerger<E> extends UnilateralSortMerger<E> {
public CombiningUnilateralSortMerger(GenericCombine<E> combineStub, MemoryManager memoryManager, IOManager ioManager,
MutableObjectIterator<E> input, AbstractInvokable parentTask,
TypeSerializerFactory<E> serializerFactory, TypeComparator<E> comparator,
- long totalMemory, int numSortBuffers, int maxNumFileHandles,
+ double memoryFraction, int numSortBuffers, int maxNumFileHandles,
float startSpillingFraction)
throws IOException, MemoryAllocationException
{
super(memoryManager, ioManager, input, parentTask, serializerFactory, comparator,
- totalMemory, numSortBuffers, maxNumFileHandles, startSpillingFraction, false);
+ memoryFraction, numSortBuffers, maxNumFileHandles, startSpillingFraction, false);
this.combineStub = combineStub;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/UnilateralSortMerger.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/UnilateralSortMerger.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/UnilateralSortMerger.java
index 856ebf8..6905b85 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/UnilateralSortMerger.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/sort/UnilateralSortMerger.java
@@ -174,7 +174,7 @@ public class UnilateralSortMerger<E> implements Sorter<E> {
* @param parentTask The parent task, which owns all resources used by this sorter.
* @param serializerFactory The type serializer.
* @param comparator The type comparator establishing the order relation.
- * @param totalMemory The total amount of memory dedicated to sorting, merging and I/O.
+ * @param memoryFraction The fraction of memory dedicated to sorting, merging and I/O.
* @param maxNumFileHandles The maximum number of files to be merged at once.
* @param startSpillingFraction The faction of the buffers that have to be filled before the spilling thread
* actually begins spilling data to disk.
@@ -186,11 +186,11 @@ public class UnilateralSortMerger<E> implements Sorter<E> {
public UnilateralSortMerger(MemoryManager memoryManager, IOManager ioManager,
MutableObjectIterator<E> input, AbstractInvokable parentTask,
TypeSerializerFactory<E> serializerFactory, TypeComparator<E> comparator,
- long totalMemory, int maxNumFileHandles, float startSpillingFraction)
+ double memoryFraction, int maxNumFileHandles, float startSpillingFraction)
throws IOException, MemoryAllocationException
{
this(memoryManager, ioManager, input, parentTask, serializerFactory, comparator,
- totalMemory, -1, maxNumFileHandles, startSpillingFraction);
+ memoryFraction, -1, maxNumFileHandles, startSpillingFraction);
}
/**
@@ -204,7 +204,7 @@ public class UnilateralSortMerger<E> implements Sorter<E> {
* @param parentTask The parent task, which owns all resources used by this sorter.
* @param serializerFactory The type serializer.
* @param comparator The type comparator establishing the order relation.
- * @param totalMemory The total amount of memory dedicated to sorting, merging and I/O.
+ * @param memoryFraction The fraction of memory dedicated to sorting, merging and I/O.
* @param numSortBuffers The number of distinct buffers to use creation of the initial runs.
* @param maxNumFileHandles The maximum number of files to be merged at once.
* @param startSpillingFraction The faction of the buffers that have to be filled before the spilling thread
@@ -217,12 +217,12 @@ public class UnilateralSortMerger<E> implements Sorter<E> {
public UnilateralSortMerger(MemoryManager memoryManager, IOManager ioManager,
MutableObjectIterator<E> input, AbstractInvokable parentTask,
TypeSerializerFactory<E> serializerFactory, TypeComparator<E> comparator,
- long totalMemory, int numSortBuffers, int maxNumFileHandles,
+ double memoryFraction, int numSortBuffers, int maxNumFileHandles,
float startSpillingFraction)
throws IOException, MemoryAllocationException
{
this(memoryManager, ioManager, input, parentTask, serializerFactory, comparator,
- totalMemory, numSortBuffers, maxNumFileHandles, startSpillingFraction, false);
+ memoryFraction, numSortBuffers, maxNumFileHandles, startSpillingFraction, false);
}
/**
@@ -234,7 +234,7 @@ public class UnilateralSortMerger<E> implements Sorter<E> {
* @param parentTask The parent task, which owns all resources used by this sorter.
* @param serializerFactory The type serializer.
* @param comparator The type comparator establishing the order relation.
- * @param totalMemory The total amount of memory dedicated to sorting, merging and I/O.
+ * @param memoryFraction The fraction of memory dedicated to sorting, merging and I/O.
* @param numSortBuffers The number of distinct buffers to use creation of the initial runs.
* @param maxNumFileHandles The maximum number of files to be merged at once.
* @param startSpillingFraction The faction of the buffers that have to be filled before the spilling thread
@@ -249,7 +249,7 @@ public class UnilateralSortMerger<E> implements Sorter<E> {
protected UnilateralSortMerger(MemoryManager memoryManager, IOManager ioManager,
MutableObjectIterator<E> input, AbstractInvokable parentTask,
TypeSerializerFactory<E> serializerFactory, TypeComparator<E> comparator,
- long totalMemory, int numSortBuffers, int maxNumFileHandles,
+ double memoryFraction, int numSortBuffers, int maxNumFileHandles,
float startSpillingFraction, boolean noSpillingMemory)
throws IOException, MemoryAllocationException
{
@@ -267,7 +267,7 @@ public class UnilateralSortMerger<E> implements Sorter<E> {
this.memoryManager = memoryManager;
// adjust the memory quotas to the page size
- final int numPagesTotal = memoryManager.computeNumberOfPages(totalMemory);
+ final int numPagesTotal = memoryManager.computeNumberOfPages(memoryFraction);
if (numPagesTotal < MIN_NUM_WRITE_BUFFERS + MIN_NUM_SORT_MEM_SEGMENTS) {
throw new IllegalArgumentException("Too little memory provided to sorter to perform task. " +
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/AbstractCachedBuildSideMatchDriver.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/AbstractCachedBuildSideMatchDriver.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/AbstractCachedBuildSideMatchDriver.java
index 66a4986..1d3c55d 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/AbstractCachedBuildSideMatchDriver.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/AbstractCachedBuildSideMatchDriver.java
@@ -67,7 +67,7 @@ public abstract class AbstractCachedBuildSideMatchDriver<IT1, IT2, OT> extends M
TypePairComparatorFactory<IT1, IT2> pairComparatorFactory =
this.taskContext.getTaskConfig().getPairComparatorFactory(this.taskContext.getUserCodeClassLoader());
- int numMemoryPages = this.taskContext.getMemoryManager().computeNumberOfPages(config.getMemoryDriver());
+ int numMemoryPages = this.taskContext.getMemoryManager().computeNumberOfPages(config.getRelativeMemoryDriver());
List<MemorySegment> memSegments = this.taskContext.getMemoryManager().allocatePages(
this.taskContext.getOwningNepheleTask(), numMemoryPages);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/CrossDriver.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/CrossDriver.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/CrossDriver.java
index 39f563d..181a687 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/CrossDriver.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/CrossDriver.java
@@ -116,8 +116,7 @@ public class CrossDriver<T1, T2, OT> implements PactDriver<GenericCrosser<T1, T2
}
this.memManager = this.taskContext.getMemoryManager();
- final long totalAvailableMemory = config.getMemoryDriver();
- final int numPages = this.memManager.computeNumberOfPages(totalAvailableMemory);
+ final int numPages = this.memManager.computeNumberOfPages(config.getRelativeMemoryDriver());
if (numPages < 2) {
throw new RuntimeException( "The Cross task was initialized with too little memory. " +
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
index 638a7aa..82359f5 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
@@ -138,7 +138,7 @@ public class DataSinkTask<IT> extends AbstractOutputTask {
getEnvironment().getMemoryManager(),
getEnvironment().getIOManager(),
this.reader, this, this.inputTypeSerializerFactory, compFact.createComparator(),
- this.config.getMemoryInput(0), this.config.getFilehandlesInput(0),
+ this.config.getRelativeMemoryInput(0), this.config.getFilehandlesInput(0),
this.config.getSpillingThresholdInput(0));
this.localStrategy = sorter;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/GroupReduceCombineDriver.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/GroupReduceCombineDriver.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/GroupReduceCombineDriver.java
index 0d51363..8cef403 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/GroupReduceCombineDriver.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/GroupReduceCombineDriver.java
@@ -79,8 +79,6 @@ public class GroupReduceCombineDriver<T> implements PactDriver<GenericCombine<T>
final TaskConfig config = this.taskContext.getTaskConfig();
final DriverStrategy ls = config.getDriverStrategy();
- final long availableMemory = config.getMemoryDriver();
-
final MemoryManager memoryManager = this.taskContext.getMemoryManager();
final MutableObjectIterator<T> in = this.taskContext.getInput(0);
@@ -90,7 +88,7 @@ public class GroupReduceCombineDriver<T> implements PactDriver<GenericCombine<T>
switch (ls) {
case SORTED_GROUP_COMBINE:
this.input = new AsynchronousPartialSorter<T>(memoryManager, in, this.taskContext.getOwningNepheleTask(),
- this.serializerFactory, this.comparator.duplicate(), availableMemory);
+ this.serializerFactory, this.comparator.duplicate(), config.getRelativeMemoryDriver());
break;
// obtain and return a grouped iterator from the combining sort-merger
default:
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/MatchDriver.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/MatchDriver.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/MatchDriver.java
index b356a58..a651894 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/MatchDriver.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/MatchDriver.java
@@ -83,8 +83,8 @@ public class MatchDriver<IT1, IT2, OT> implements PactDriver<GenericJoiner<IT1,
final IOManager ioManager = this.taskContext.getIOManager();
// set up memory and I/O parameters
- final long availableMemory = config.getMemoryDriver();
- final int numPages = memoryManager.computeNumberOfPages(availableMemory);
+ final double fractionAvailableMemory = config.getRelativeMemoryDriver();
+ final int numPages = memoryManager.computeNumberOfPages(fractionAvailableMemory);
// test minimum memory requirements
final DriverStrategy ls = config.getDriverStrategy();
@@ -106,23 +106,23 @@ public class MatchDriver<IT1, IT2, OT> implements PactDriver<GenericJoiner<IT1,
// create and return MatchTaskIterator according to provided local strategy.
switch (ls) {
- case MERGE:
- this.matchIterator = new MergeMatchIterator<IT1, IT2, OT>(in1, in2, serializer1, comparator1,
- serializer2, comparator2, pairComparatorFactory.createComparator12(comparator1, comparator2),
- memoryManager, ioManager, numPages, this.taskContext.getOwningNepheleTask());
- break;
- case HYBRIDHASH_BUILD_FIRST:
- this.matchIterator = new BuildFirstHashMatchIterator<IT1, IT2, OT>(in1, in2, serializer1, comparator1,
- serializer2, comparator2, pairComparatorFactory.createComparator21(comparator1, comparator2),
- memoryManager, ioManager, this.taskContext.getOwningNepheleTask(), availableMemory);
- break;
- case HYBRIDHASH_BUILD_SECOND:
- this.matchIterator = new BuildSecondHashMatchIterator<IT1, IT2, OT>(in1, in2, serializer1, comparator1,
- serializer2, comparator2, pairComparatorFactory.createComparator12(comparator1, comparator2),
- memoryManager, ioManager, this.taskContext.getOwningNepheleTask(), availableMemory);
- break;
- default:
- throw new Exception("Unsupported driver strategy for Match driver: " + ls.name());
+ case MERGE:
+ this.matchIterator = new MergeMatchIterator<IT1, IT2, OT>(in1, in2, serializer1, comparator1,
+ serializer2, comparator2, pairComparatorFactory.createComparator12(comparator1, comparator2),
+ memoryManager, ioManager, numPages, this.taskContext.getOwningNepheleTask());
+ break;
+ case HYBRIDHASH_BUILD_FIRST:
+ this.matchIterator = new BuildFirstHashMatchIterator<IT1, IT2, OT>(in1, in2, serializer1, comparator1,
+ serializer2, comparator2, pairComparatorFactory.createComparator21(comparator1, comparator2),
+ memoryManager, ioManager, this.taskContext.getOwningNepheleTask(), fractionAvailableMemory);
+ break;
+ case HYBRIDHASH_BUILD_SECOND:
+ this.matchIterator = new BuildSecondHashMatchIterator<IT1, IT2, OT>(in1, in2, serializer1, comparator1,
+ serializer2, comparator2, pairComparatorFactory.createComparator12(comparator1, comparator2),
+ memoryManager, ioManager, this.taskContext.getOwningNepheleTask(), fractionAvailableMemory);
+ break;
+ default:
+ throw new Exception("Unsupported driver strategy for Match driver: " + ls.name());
}
// open MatchTaskIterator - this triggers the sorting or hash-table building
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/ReduceCombineDriver.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/ReduceCombineDriver.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/ReduceCombineDriver.java
index 14310ca..2eaba54 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/ReduceCombineDriver.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/ReduceCombineDriver.java
@@ -99,7 +99,8 @@ public class ReduceCombineDriver<T> implements PactDriver<GenericReduce<T>, T> {
}
this.memManager = this.taskContext.getMemoryManager();
- final int numMemoryPages = memManager.computeNumberOfPages(this.taskContext.getTaskConfig().getMemoryDriver());
+ final int numMemoryPages = memManager.computeNumberOfPages(this.taskContext.getTaskConfig()
+ .getRelativeMemoryDriver());
// instantiate the serializer / comparator
final TypeSerializerFactory<T> serializerFactory = this.taskContext.getInputSerializer(0);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
index 92c4648..1d7c931 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
@@ -840,7 +840,7 @@ public class RegularPactTask<S extends Function, OT> extends AbstractTask implem
this.inputIsCached[i] = cached;
if (async || cached) {
- memoryPages = memMan.computeNumberOfPages(this.config.getInputMaterializationMemory(i));
+ memoryPages = memMan.computeNumberOfPages(this.config.getRelativeInputMaterializationMemory(i));
if (memoryPages <= 0) {
throw new Exception("Input marked as materialized/cached, but no memory for materialization provided.");
}
@@ -946,7 +946,7 @@ public class RegularPactTask<S extends Function, OT> extends AbstractTask implem
@SuppressWarnings({ "rawtypes", "unchecked" })
UnilateralSortMerger<?> sorter = new UnilateralSortMerger(getMemoryManager(), getIOManager(),
this.inputIterators[inputNum], this, this.inputSerializers[inputNum], getLocalStrategyComparator(inputNum),
- this.config.getMemoryInput(inputNum), this.config.getFilehandlesInput(inputNum),
+ this.config.getRelativeMemoryInput(inputNum), this.config.getFilehandlesInput(inputNum),
this.config.getSpillingThresholdInput(inputNum));
// set the input to null such that it will be lazily fetched from the input strategy
this.inputs[inputNum] = null;
@@ -982,7 +982,7 @@ public class RegularPactTask<S extends Function, OT> extends AbstractTask implem
CombiningUnilateralSortMerger<?> cSorter = new CombiningUnilateralSortMerger(
(GenericCombine) localStub, getMemoryManager(), getIOManager(), this.inputIterators[inputNum],
this, this.inputSerializers[inputNum], getLocalStrategyComparator(inputNum),
- this.config.getMemoryInput(inputNum), this.config.getFilehandlesInput(inputNum),
+ this.config.getRelativeMemoryInput(inputNum), this.config.getFilehandlesInput(inputNum),
this.config.getSpillingThresholdInput(inputNum));
cSorter.setUdfConfiguration(this.config.getStubParameters());
@@ -1022,12 +1022,6 @@ public class RegularPactTask<S extends Function, OT> extends AbstractTask implem
final MutableObjectIterator<?> iter = new ReaderIterator(reader, serializerFactory.getSerializer());
return iter;
}
-// // generic data type serialization
-// @SuppressWarnings("unchecked")
-// MutableReader<DeserializationDelegate<?>> reader = (MutableReader<DeserializationDelegate<?>>) inputReader;
-// @SuppressWarnings({ "unchecked", "rawtypes" })
-// final MutableObjectIterator<?> iter = new ReaderIterator(reader, serializer);
-// return iter;
}
protected int getNumTaskInputs() {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/chaining/SynchronousChainedCombineDriver.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/chaining/SynchronousChainedCombineDriver.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/chaining/SynchronousChainedCombineDriver.java
index 814eb62..98d65f1 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/chaining/SynchronousChainedCombineDriver.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/chaining/SynchronousChainedCombineDriver.java
@@ -81,7 +81,7 @@ public class SynchronousChainedCombineDriver<T> extends ChainedDriver<T, T> {
// ----------------- Set up the asynchronous sorter -------------------------
this.memManager = this.parent.getEnvironment().getMemoryManager();
- final int numMemoryPages = memManager.computeNumberOfPages(this.config.getMemoryDriver());
+ final int numMemoryPages = memManager.computeNumberOfPages(this.config.getRelativeMemoryDriver());
// instantiate the serializer / comparator
final TypeSerializerFactory<T> serializerFactory = this.config.getInputSerializer(0, this.userCodeClassLoader);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
index 947e22a..a43f8cc 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
@@ -454,12 +454,12 @@ public class TaskConfig {
return this.config.getBoolean(INPUT_REPLAYABLE_PREFIX + inputNum, false);
}
- public void setInputMaterializationMemory(int inputNum, long memory) {
- this.config.setLong(INPUT_DAM_MEMORY_PREFIX + inputNum, memory);
+ public void setRelativeInputMaterializationMemory(int inputNum, double relativeMemory) {
+ this.config.setDouble(INPUT_DAM_MEMORY_PREFIX + inputNum, relativeMemory);
}
- public long getInputMaterializationMemory(int inputNum) {
- return this.config.getLong(INPUT_DAM_MEMORY_PREFIX + inputNum, -1);
+ public double getRelativeInputMaterializationMemory(int inputNum) {
+ return this.config.getDouble(INPUT_DAM_MEMORY_PREFIX + inputNum, 0);
}
public void setBroadcastInputName(String name, int groupIndex) {
@@ -577,20 +577,20 @@ public class TaskConfig {
// Parameters to configure the memory and I/O behavior
// --------------------------------------------------------------------------------------------
- public void setMemoryDriver(long memorySize) {
- this.config.setLong(MEMORY_DRIVER, memorySize);
+ public void setRelativeMemoryDriver(double relativeMemorySize) {
+ this.config.setDouble(MEMORY_DRIVER, relativeMemorySize);
}
- public long getMemoryDriver() {
- return this.config.getLong(MEMORY_DRIVER, -1);
+ public double getRelativeMemoryDriver() {
+ return this.config.getDouble(MEMORY_DRIVER, 0);
}
- public void setMemoryInput(int inputNum, long memorySize) {
- this.config.setLong(MEMORY_INPUT_PREFIX + inputNum, memorySize);
+ public void setRelativeMemoryInput(int inputNum, double relativeMemorySize) {
+ this.config.setDouble(MEMORY_INPUT_PREFIX + inputNum, relativeMemorySize);
}
- public long getMemoryInput(int inputNum) {
- return this.config.getLong(MEMORY_INPUT_PREFIX + inputNum, -1);
+ public double getRelativeMemoryInput(int inputNum) {
+ return this.config.getDouble(MEMORY_INPUT_PREFIX + inputNum, 0);
}
// --------------------------------------------------------------------------------------------
@@ -732,30 +732,30 @@ public class TaskConfig {
return index;
}
- public void setBackChannelMemory(long memory) {
- if (memory < 0) {
+ public void setRelativeBackChannelMemory(double relativeMemory) {
+ if (relativeMemory < 0) {
throw new IllegalArgumentException();
}
- this.config.setLong(ITERATION_HEAD_BACKCHANNEL_MEMORY, memory);
+ this.config.setDouble(ITERATION_HEAD_BACKCHANNEL_MEMORY, relativeMemory);
}
- public long getBackChannelMemory() {
- long backChannelMemory = this.config.getLong(ITERATION_HEAD_BACKCHANNEL_MEMORY, 0);
- if (backChannelMemory <= 0) {
+ public double getRelativeBackChannelMemory() {
+ double relativeBackChannelMemory = this.config.getDouble(ITERATION_HEAD_BACKCHANNEL_MEMORY, 0);
+ if (relativeBackChannelMemory <= 0) {
throw new IllegalArgumentException();
}
- return backChannelMemory;
+ return relativeBackChannelMemory;
}
- public void setSolutionSetMemory(long memory) {
- if (memory < 0) {
+ public void setRelativeSolutionSetMemory(double relativeMemory) {
+ if (relativeMemory < 0) {
throw new IllegalArgumentException();
}
- this.config.setLong(ITERATION_HEAD_SOLUTION_SET_MEMORY, memory);
+ this.config.setDouble(ITERATION_HEAD_SOLUTION_SET_MEMORY, relativeMemory);
}
- public long getSolutionSetMemory() {
- long backChannelMemory = this.config.getLong(ITERATION_HEAD_SOLUTION_SET_MEMORY, 0);
+ public double getRelativeSolutionSetMemory() {
+ double backChannelMemory = this.config.getDouble(ITERATION_HEAD_SOLUTION_SET_MEMORY, 0);
if (backChannelMemory <= 0) {
throw new IllegalArgumentException();
}
@@ -1198,6 +1198,16 @@ public class TaskConfig {
public void setFloat(String key, float value) {
this.backingConfig.setFloat(this.prefix + key, value);
}
+
+ @Override
+ public double getDouble(String key, double defaultValue) {
+ return this.backingConfig.getDouble(this.prefix + key, defaultValue);
+ }
+
+ @Override
+ public void setDouble(String key, double value) {
+ this.backingConfig.setDouble(this.prefix + key, value);
+ }
@Override
public byte[] getBytes(final String key, final byte[] defaultValue) {
@@ -1220,16 +1230,6 @@ public class TaskConfig {
}
@Override
- public double getDouble(String key, double defaultValue) {
- return backingConfig.getDouble(this.prefix + key, defaultValue);
- }
-
- @Override
- public void setDouble(String key, double value) {
- backingConfig.setDouble(this.prefix + key, value);
- }
-
- @Override
public String toString() {
return backingConfig.toString();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/channels/InputChannel.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/channels/InputChannel.java b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/channels/InputChannel.java
index 6122c36..4109a2b 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/channels/InputChannel.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/channels/InputChannel.java
@@ -82,8 +82,6 @@ public class InputChannel<T extends IOReadableWritable> extends Channel implemen
private int lastReceivedEnvelope = -1;
- private ChannelID lastSourceID = null;
-
private boolean destroyCalled = false;
// ----------------------
@@ -157,10 +155,6 @@ public class InputChannel<T extends IOReadableWritable> extends Channel implemen
return this.inputGate.getJobID();
}
-// public abstract AbstractTaskEvent getCurrentEvent();
-
- private DeserializationResult lastDeserializationResult;
-
public InputChannelResult readRecord(T target) throws IOException {
if (this.dataBuffer == null) {
@@ -207,7 +201,6 @@ public class InputChannel<T extends IOReadableWritable> extends Channel implemen
}
DeserializationResult deserializationResult = this.deserializer.getNextRecord(target);
- this.lastDeserializationResult = deserializationResult;
if (deserializationResult.isBufferConsumed()) {
releasedConsumedReadBuffer(this.dataBuffer);
@@ -352,7 +345,6 @@ public class InputChannel<T extends IOReadableWritable> extends Channel implemen
this.queuedEnvelopes.add(envelope);
this.lastReceivedEnvelope = sequenceNumber;
- this.lastSourceID = envelope.getSource();
// Notify the channel about the new data. notify as much as there is (buffer plus once per event)
if (envelope.getBuffer() != null) {
@@ -464,6 +456,7 @@ public class InputChannel<T extends IOReadableWritable> extends Channel implemen
}
// schedule events as pending, because events come always after the buffer!
+ @SuppressWarnings("unchecked")
List<AbstractEvent> events = (List<AbstractEvent>) nextEnvelope.deserializeEvents();
Iterator<AbstractEvent> eventsIt = events.iterator();
if (eventsIt.hasNext()) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/gates/InputGate.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/gates/InputGate.java b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/gates/InputGate.java
index c623220..dc506ef 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/gates/InputGate.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/gates/InputGate.java
@@ -26,6 +26,7 @@ import eu.stratosphere.runtime.io.network.bufferprovider.BufferProvider;
import eu.stratosphere.runtime.io.network.bufferprovider.GlobalBufferPool;
import eu.stratosphere.runtime.io.network.bufferprovider.LocalBufferPool;
import eu.stratosphere.runtime.io.network.bufferprovider.LocalBufferPoolOwner;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -95,6 +96,7 @@ public class InputGate<T extends IOReadableWritable> extends Gate<T> implements
super(jobID, gateID, index);
}
+ @SuppressWarnings("unchecked")
public void initializeChannels(GateDeploymentDescriptor inputGateDescriptor){
channels = new InputChannel[inputGateDescriptor.getNumberOfChannelDescriptors()];
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/network/RemoteReceiver.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/network/RemoteReceiver.java b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/network/RemoteReceiver.java
index da36ad0..ab65b4c 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/network/RemoteReceiver.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/network/RemoteReceiver.java
@@ -18,10 +18,8 @@ import java.io.DataOutput;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
import eu.stratosphere.core.io.IOReadableWritable;
-import eu.stratosphere.util.StringUtils;
/**
* Objects of this class uniquely identify a connection to a remote {@link TaskManager}.
@@ -48,11 +46,9 @@ public final class RemoteReceiver implements IOReadableWritable {
* the index of the connection to the remote {@link TaskManager}
*/
public RemoteReceiver(final InetSocketAddress connectionAddress, final int connectionIndex) {
-
if (connectionAddress == null) {
throw new IllegalArgumentException("Argument connectionAddress must not be null");
}
-
if (connectionIndex < 0) {
throw new IllegalArgumentException("Argument connectionIndex must be a non-negative integer number");
}
@@ -75,7 +71,6 @@ public final class RemoteReceiver implements IOReadableWritable {
* @return the address of the connection to the remote {@link TaskManager}
*/
public InetSocketAddress getConnectionAddress() {
-
return this.connectionAddress;
}
@@ -85,14 +80,12 @@ public final class RemoteReceiver implements IOReadableWritable {
* @return the index of the connection to the remote {@link TaskManager}
*/
public int getConnectionIndex() {
-
return this.connectionIndex;
}
@Override
public int hashCode() {
-
return this.connectionAddress.hashCode() + (31 * this.connectionIndex);
}
@@ -131,18 +124,12 @@ public final class RemoteReceiver implements IOReadableWritable {
@Override
public void read(final DataInput in) throws IOException {
-
final int addr_length = in.readInt();
final byte[] address = new byte[addr_length];
in.readFully(address);
- InetAddress ia = null;
- try {
- ia = InetAddress.getByAddress(address);
- } catch (UnknownHostException uhe) {
- throw new IOException(StringUtils.stringifyException(uhe));
- }
- final int port = in.readInt();
+ InetAddress ia = InetAddress.getByAddress(address);
+ int port = in.readInt();
this.connectionAddress = new InetSocketAddress(ia, port);
this.connectionIndex = in.readInt();
@@ -151,7 +138,6 @@ public final class RemoteReceiver implements IOReadableWritable {
@Override
public String toString() {
-
return this.connectionAddress + " (" + this.connectionIndex + ")";
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/event/job/ManagementEventTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/event/job/ManagementEventTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/event/job/ManagementEventTest.java
index a6a5875..fd1a672 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/event/job/ManagementEventTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/event/job/ManagementEventTest.java
@@ -85,14 +85,12 @@ public class ManagementEventTest {
@Test
public void testVertexAssignmentEvent() {
- final VertexAssignmentEvent orig = new VertexAssignmentEvent(TIMESTAMP, new ManagementVertexID(), "test",
- "standard");
+ final VertexAssignmentEvent orig = new VertexAssignmentEvent(TIMESTAMP, new ManagementVertexID(), "test");
final VertexAssignmentEvent copy = (VertexAssignmentEvent) ManagementTestUtils.createCopy(orig);
assertEquals(orig.getVertexID(), copy.getVertexID());
assertEquals(orig.getTimestamp(), copy.getTimestamp());
assertEquals(orig.getInstanceName(), copy.getInstanceName());
- assertEquals(orig.getInstanceType(), copy.getInstanceType());
assertEquals(orig.hashCode(), copy.hashCode());
assertTrue(orig.equals(copy));
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
index 5ff5f1c..fa0653b 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
@@ -23,40 +23,25 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
import org.apache.log4j.Level;
import org.junit.BeforeClass;
import org.junit.Test;
-import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.fs.Path;
import eu.stratosphere.nephele.execution.ExecutionState;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
import eu.stratosphere.nephele.jobgraph.DistributionPattern;
-import eu.stratosphere.runtime.io.channels.ChannelType;
-import eu.stratosphere.nephele.util.FileLineReader;
-import eu.stratosphere.nephele.util.FileLineWriter;
import eu.stratosphere.nephele.jobgraph.JobFileInputVertex;
import eu.stratosphere.nephele.jobgraph.JobFileOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.jobgraph.JobTaskVertex;
-import eu.stratosphere.nephele.topology.NetworkTopology;
+import eu.stratosphere.nephele.util.FileLineReader;
+import eu.stratosphere.nephele.util.FileLineWriter;
import eu.stratosphere.nephele.util.ServerTestUtils;
+import eu.stratosphere.runtime.io.channels.ChannelType;
import eu.stratosphere.util.LogUtils;
/**
@@ -64,152 +49,6 @@ import eu.stratosphere.util.LogUtils;
*
*/
public class ExecutionGraphTest {
-
- /**
- * The name of the default instance type used during these tests.
- */
- private static final String DEFAULT_INSTANCE_TYPE_NAME = "test";
-
- /**
- * A test implementation of an {@link InstanceManager} which is used as a stub in these tests.
- *
- */
- private static final class TestInstanceManager implements InstanceManager {
-
- /**
- * The default instance type.
- */
- private final InstanceType defaultInstanceType;
-
- /**
- * Constructs a new test instance manager.
- */
- public TestInstanceManager() {
- this.defaultInstanceType = InstanceTypeFactory.construct(DEFAULT_INSTANCE_TYPE_NAME, 4, 4, 1024, 50, 10);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void requestInstance(final JobID jobID, final Configuration conf,
- final InstanceRequestMap instanceRequestMap,
- final List<String> splitAffinityList) throws InstanceException {
-
- throw new IllegalStateException("requestInstance called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void releaseAllocatedResource(final JobID jobID, final Configuration conf,
- final AllocatedResource allocatedResource)
- throws InstanceException {
-
- throw new IllegalStateException("releaseAllocatedResource called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public InstanceType getSuitableInstanceType(final int minNumComputeUnits, final int minNumCPUCores,
- final int minMemorySize, final int minDiskCapacity, final int maxPricePerHour) {
-
- throw new IllegalStateException("getSuitableInstanceType called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void reportHeartBeat(final InstanceConnectionInfo instanceConnectionInfo,
- final HardwareDescription hardwareDescription) {
-
- throw new IllegalStateException("reportHeartBeat called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public InstanceType getInstanceTypeByName(final String instanceTypeName) {
-
- if (this.defaultInstanceType.getIdentifier().equals(instanceTypeName)) {
- return this.defaultInstanceType;
- }
-
- return null;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public InstanceType getDefaultInstanceType() {
-
- return this.defaultInstanceType;
- }
-
- @Override
- public NetworkTopology getNetworkTopology(final JobID jobID) {
-
- throw new IllegalStateException("getNetworkTopology called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void setInstanceListener(final InstanceListener instanceListener) {
-
- throw new IllegalStateException("setInstanceListener called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes() {
-
- throw new IllegalStateException("getMapOfAvailableInstanceType called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void shutdown() {
-
- throw new IllegalStateException("shutdown called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public AbstractInstance getInstanceByName(final String name) {
- throw new IllegalStateException("getInstanceByName called on TestInstanceManager");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void cancelPendingRequests(final JobID jobID) {
- throw new IllegalStateException("cancelPendingRequests called on TestInstanceManager");
- }
-
- @Override
- public int getNumberOfTaskTrackers() {
- return 0;
- }
-
- }
-
- private static final InstanceManager INSTANCE_MANAGER = new TestInstanceManager();
-
@BeforeClass
public static void reduceLogLevel() {
LogUtils.initializeDefaultConsoleLogger(Level.WARN);
@@ -259,15 +98,11 @@ public class ExecutionGraphTest {
LibraryCacheManager.register(jobID, new String[0]);
- final ExecutionGraph eg = new ExecutionGraph(jg, INSTANCE_MANAGER);
+ final ExecutionGraph eg = new ExecutionGraph(jg, -1);
// test all methods of ExecutionGraph
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
final ExecutionStage executionStage = eg.getCurrentExecutionStage();
- executionStage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
- assertEquals(1, instanceRequestMap.size());
- assertEquals(1, (int) instanceRequestMap.getMaximumNumberOfInstances(INSTANCE_MANAGER
- .getInstanceTypeByName(DEFAULT_INSTANCE_TYPE_NAME)));
+ assertEquals(1, executionStage.getMaxNumberSubtasks());
assertEquals(jobID, eg.getJobID());
assertEquals(0, eg.getIndexOfCurrentExecutionStage());
@@ -332,15 +167,11 @@ public class ExecutionGraphTest {
assertNotNull(egv0.getGroupMember(0));
assertNull(egv0.getGroupMember(1));
assertEquals(1, egv0.getInputSplits().length);
- assertEquals(-1, egv0.getMaximumNumberOfGroupMembers());
- assertEquals(1, egv0.getMinimumNumberOfGroupMember());
assertEquals("Input 1", egv0.getName());
assertEquals(0, egv0.getNumberOfBackwardLinks());
assertEquals(1, egv0.getNumberOfForwardLinks());
- assertEquals(1, egv0.getNumberOfSubtasksPerInstance());
assertEquals(0, egv0.getStageNumber());
assertEquals(-1, egv0.getUserDefinedNumberOfMembers());
- assertEquals(INSTANCE_MANAGER.getDefaultInstanceType(), egv0.getInstanceType());
assertEquals("Task 1", egv0.getVertexToShareInstancesWith().getName());
// egv1 (output1)
@@ -354,15 +185,11 @@ public class ExecutionGraphTest {
assertNull(egv1.getForwardEdge(0));
assertNotNull(egv1.getGroupMember(0));
assertNull(egv1.getGroupMember(1));
- assertEquals(1, egv1.getMaximumNumberOfGroupMembers());
- assertEquals(1, egv1.getMinimumNumberOfGroupMember());
assertEquals("Output 1", egv1.getName());
assertEquals(1, egv1.getNumberOfBackwardLinks());
assertEquals(0, egv1.getNumberOfForwardLinks());
- assertEquals(1, egv1.getNumberOfSubtasksPerInstance());
assertEquals(0, egv1.getStageNumber());
assertEquals(-1, egv1.getUserDefinedNumberOfMembers());
- assertEquals(INSTANCE_MANAGER.getInstanceTypeByName(DEFAULT_INSTANCE_TYPE_NAME), egv1.getInstanceType());
assertEquals("Input 1", egv1.getVertexToShareInstancesWith().getName());
// egv2 (task1)
@@ -378,15 +205,11 @@ public class ExecutionGraphTest {
assertNotNull(egv2.getForwardEdges(egv1));
assertNotNull(egv2.getGroupMember(0));
assertNull(egv2.getGroupMember(1));
- assertEquals(-1, egv2.getMaximumNumberOfGroupMembers());
- assertEquals(1, egv2.getMinimumNumberOfGroupMember());
assertEquals("Task 1", egv2.getName());
assertEquals(1, egv2.getNumberOfBackwardLinks());
assertEquals(1, egv2.getNumberOfForwardLinks());
- assertEquals(1, egv2.getNumberOfSubtasksPerInstance());
assertEquals(0, egv2.getStageNumber());
assertEquals(-1, egv2.getUserDefinedNumberOfMembers());
- assertEquals(INSTANCE_MANAGER.getInstanceTypeByName(DEFAULT_INSTANCE_TYPE_NAME), egv2.getInstanceType());
assertNull(egv2.getVertexToShareInstancesWith());
// test all methods of ExecutionVertex
@@ -398,25 +221,16 @@ public class ExecutionGraphTest {
assertEquals(egv0, ev0.getGroupVertex());
assertNotNull(ev0.getID());
assertEquals("Input 1", ev0.getName());
- assertEquals(INSTANCE_MANAGER.getInstanceTypeByName(DEFAULT_INSTANCE_TYPE_NAME), ev0.getAllocatedResource()
- .getInstance()
- .getType());
// ev1 (output1)
assertEquals(egv1, ev1.getGroupVertex());
assertNotNull(ev1.getID());
assertEquals("Output 1", ev1.getName());
- assertEquals(INSTANCE_MANAGER.getInstanceTypeByName(DEFAULT_INSTANCE_TYPE_NAME), ev1.getAllocatedResource()
- .getInstance()
- .getType());
// ev2 (task1)
assertEquals(egv2, ev2.getGroupVertex());
assertNotNull(ev2.getID());
assertEquals("Task 1", ev2.getName());
- assertEquals(INSTANCE_MANAGER.getInstanceTypeByName(DEFAULT_INSTANCE_TYPE_NAME), ev2.getAllocatedResource()
- .getInstance()
- .getType());
assertEquals(ev0.getAllocatedResource(), ev1.getAllocatedResource());
assertEquals(ev0.getAllocatedResource(), ev2.getAllocatedResource());
@@ -448,7 +262,7 @@ public class ExecutionGraphTest {
* input1 -> task1 -> output1
* no subtasks defined
* input1 is default, task1 is m1.large, output1 is m1.xlarge
- * all channels are IN_MEMORY
+ * all channels are INMEMORY
*/
@Test
public void testConvertJobGraphToExecutionGraph2() {
@@ -484,15 +298,11 @@ public class ExecutionGraphTest {
LibraryCacheManager.register(jobID, new String[0]);
// now convert job graph to execution graph
- final ExecutionGraph eg = new ExecutionGraph(jg, INSTANCE_MANAGER);
+ final ExecutionGraph eg = new ExecutionGraph(jg, 1);
// test instance types in ExecutionGraph
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
final ExecutionStage executionStage = eg.getCurrentExecutionStage();
- executionStage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
- assertEquals(1, instanceRequestMap.size());
- assertEquals(1,
- (int) instanceRequestMap.getMaximumNumberOfInstances(INSTANCE_MANAGER.getDefaultInstanceType()));
+ assertEquals(1, executionStage.getMaxNumberSubtasks());
// stage0
ExecutionStage es = eg.getStage(0);
@@ -523,12 +333,6 @@ public class ExecutionGraphTest {
ExecutionVertex ev0 = egv0.getGroupMember(0); // input1
ExecutionVertex ev1 = egv1.getGroupMember(0); // output1
ExecutionVertex ev2 = egv2.getGroupMember(0); // task1
- // ev0 (input1)
- assertEquals(INSTANCE_MANAGER.getDefaultInstanceType(), ev0.getAllocatedResource().getInstance().getType());
- // ev1 (output1)
- assertEquals(INSTANCE_MANAGER.getDefaultInstanceType(), ev1.getAllocatedResource().getInstance().getType());
- // ev2 (task1)
- assertEquals(INSTANCE_MANAGER.getDefaultInstanceType(), ev2.getAllocatedResource().getInstance().getType());
assertEquals(ev0.getAllocatedResource(), ev1.getAllocatedResource());
assertEquals(ev0.getAllocatedResource(), ev2.getAllocatedResource());
} catch (GraphConversionException e) {
@@ -618,15 +422,11 @@ public class ExecutionGraphTest {
LibraryCacheManager.register(jobID, new String[0]);
- final ExecutionGraph eg = new ExecutionGraph(jg, INSTANCE_MANAGER);
+ final ExecutionGraph eg = new ExecutionGraph(jg, 1);
// test instance types in ExecutionGraph
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
final ExecutionStage executionStage = eg.getCurrentExecutionStage();
- executionStage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
- assertEquals(1, instanceRequestMap.size());
- assertEquals(2,
- (int) instanceRequestMap.getMaximumNumberOfInstances(INSTANCE_MANAGER.getDefaultInstanceType()));
+ assertEquals(2, executionStage.getMaxNumberSubtasks());
// stage0
final ExecutionStage es = eg.getStage(0);
@@ -828,40 +628,32 @@ public class ExecutionGraphTest {
i1.setFileInputClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile1.toURI()));
i1.setNumberOfSubtasks(4);
- i1.setNumberOfSubtasksPerInstance(2);
final JobFileInputVertex i2 = new JobFileInputVertex("Input 2", jg);
i2.setFileInputClass(FileLineReader.class);
i2.setFilePath(new Path(inputFile2.toURI()));
i2.setNumberOfSubtasks(4);
- i2.setNumberOfSubtasksPerInstance(2);
// task vertex
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
t1.setTaskClass(ForwardTask1Input1Output.class);
t1.setNumberOfSubtasks(4);
- t1.setNumberOfSubtasksPerInstance(2);
final JobTaskVertex t2 = new JobTaskVertex("Task 2", jg);
t2.setTaskClass(ForwardTask1Input1Output.class);
t2.setNumberOfSubtasks(4);
- t2.setNumberOfSubtasksPerInstance(2);
final JobTaskVertex t3 = new JobTaskVertex("Task 3", jg);
t3.setTaskClass(ForwardTask2Inputs1Output.class);
t3.setNumberOfSubtasks(8);
- t3.setNumberOfSubtasksPerInstance(4);
final JobTaskVertex t4 = new JobTaskVertex("Task 4", jg);
t4.setTaskClass(ForwardTask1Input2Outputs.class);
t4.setNumberOfSubtasks(8);
- t4.setNumberOfSubtasksPerInstance(4);
// output vertex
final JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
o1.setFileOutputClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile1.toURI()));
o1.setNumberOfSubtasks(4);
- o1.setNumberOfSubtasksPerInstance(2);
final JobFileOutputVertex o2 = new JobFileOutputVertex("Output 2", jg);
o2.setFileOutputClass(FileLineWriter.class);
o2.setFilePath(new Path(outputFile2.toURI()));
o2.setNumberOfSubtasks(4);
- o2.setNumberOfSubtasksPerInstance(2);
o1.setVertexToShareInstancesWith(o2);
// connect vertices
@@ -876,19 +668,14 @@ public class ExecutionGraphTest {
LibraryCacheManager.register(jobID, new String[0]);
// now convert job graph to execution graph
- final ExecutionGraph eg = new ExecutionGraph(jg, INSTANCE_MANAGER);
+ final ExecutionGraph eg = new ExecutionGraph(jg, 1);
// test instance types in ExecutionGraph
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
ExecutionStage executionStage = eg.getCurrentExecutionStage();
assertNotNull(executionStage);
assertEquals(0, executionStage.getStageNumber());
- executionStage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
- assertEquals(1, instanceRequestMap.size());
- assertEquals(8,
- (int) instanceRequestMap.getMaximumNumberOfInstances(INSTANCE_MANAGER
- .getInstanceTypeByName(DEFAULT_INSTANCE_TYPE_NAME)));
+ assertEquals(20, executionStage.getRequiredSlots());
// Fake transition to next stage by triggering execution state changes manually
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(eg, eg.getIndexOfCurrentExecutionStage(),
true, true);
@@ -903,7 +690,6 @@ public class ExecutionGraphTest {
ev.updateExecutionState(ExecutionState.FINISHING);
ev.updateExecutionState(ExecutionState.FINISHED);
}
- instanceRequestMap.clear();
} catch (GraphConversionException e) {
fail(e.getMessage());
} catch (JobGraphDefinitionException e) {
@@ -983,7 +769,7 @@ public class ExecutionGraphTest {
LibraryCacheManager.register(jobID, new String[0]);
// now convert job graph to execution graph
- final ExecutionGraph eg = new ExecutionGraph(jg, INSTANCE_MANAGER);
+ final ExecutionGraph eg = new ExecutionGraph(jg, 1);
assertEquals(1, eg.getNumberOfStages());
@@ -1116,9 +902,9 @@ public class ExecutionGraphTest {
input1.connectTo(forward1, ChannelType.IN_MEMORY,
DistributionPattern.POINTWISE);
forward1.connectTo(forward2, ChannelType.IN_MEMORY,
- DistributionPattern.POINTWISE);
+ DistributionPattern.POINTWISE);
forward2.connectTo(forward3, ChannelType.NETWORK,
- DistributionPattern.POINTWISE);
+ DistributionPattern.POINTWISE);
forward3.connectTo(output1, ChannelType.IN_MEMORY);
// setup instance sharing
@@ -1130,7 +916,7 @@ public class ExecutionGraphTest {
LibraryCacheManager.register(jobID, new String[0]);
// now convert job graph to execution graph
- final ExecutionGraph eg = new ExecutionGraph(jg, INSTANCE_MANAGER);
+ final ExecutionGraph eg = new ExecutionGraph(jg, 1);
// Check number of stages
assertEquals(1, eg.getNumberOfStages());
@@ -1139,16 +925,8 @@ public class ExecutionGraphTest {
final ExecutionStage stage = eg.getStage(0);
assertEquals(5, stage.getNumberOfStageMembers());
- // Check number of required instances
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
- stage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
-
- // First, we expect all required instances to be of the same type
- assertEquals(1, instanceRequestMap.size());
-
- final int numberOfRequiredInstances = instanceRequestMap.getMinimumNumberOfInstances(INSTANCE_MANAGER
- .getDefaultInstanceType());
- assertEquals(degreeOfParallelism, numberOfRequiredInstances);
+ final int numberOfRequiredSlots = stage.getMaxNumberSubtasks();
+ assertEquals(degreeOfParallelism, numberOfRequiredSlots);
} catch (GraphConversionException e) {
fail(e.getMessage());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTest.java
deleted file mode 100644
index 72d58c9..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTest.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.net.InetAddress;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import eu.stratosphere.configuration.ConfigConstants;
-import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.configuration.GlobalConfiguration;
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.AllocationID;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.HardwareDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.util.LogUtils;
-
-/**
- * Tests for {@link ClusterManager}.
- */
-public class ClusterManagerTest {
-
- @BeforeClass
- public static void initLogging() {
- LogUtils.initializeDefaultTestConsoleLogger();
- }
-
-
- @Test
- public void testInstanceRegistering() {
- try {
- ClusterManager cm = new ClusterManager();
- TestInstanceListener testInstanceListener = new TestInstanceListener();
- cm.setInstanceListener(testInstanceListener);
-
-
- int ipcPort = ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT;
- int dataPort = ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT;
-
- HardwareDescription hardwareDescription = HardwareDescriptionFactory.construct(2, 2L * 1024L * 1024L * 1024L,
- 2L * 1024L * 1024L * 1024L);
-
- String hostname = "192.168.198.1";
- InetAddress address = InetAddress.getByName("192.168.198.1");
-
- InstanceConnectionInfo ici1 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 0, dataPort + 0);
- InstanceConnectionInfo ici2 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 15, dataPort + 15);
- InstanceConnectionInfo ici3 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 30, dataPort + 30);
-
- // register three instances
- cm.reportHeartBeat(ici1, hardwareDescription);
- cm.reportHeartBeat(ici2, hardwareDescription);
- cm.reportHeartBeat(ici3, hardwareDescription);
-
-
- Map<InstanceType, InstanceTypeDescription> instanceTypeDescriptions = cm.getMapOfAvailableInstanceTypes();
- assertEquals(1, instanceTypeDescriptions.size());
-
- InstanceTypeDescription descr = instanceTypeDescriptions.entrySet().iterator().next().getValue();
-
- assertEquals(3, descr.getMaximumNumberOfAvailableInstances());
-
- cm.shutdown();
- }
- catch (Exception e) {
- System.err.println(e.getMessage());
- e.printStackTrace();
- Assert.fail("Test erroneous: " + e.getMessage());
- }
- }
-
- @Test
- public void testAllocationDeallocation() {
- try {
- ClusterManager cm = new ClusterManager();
- TestInstanceListener testInstanceListener = new TestInstanceListener();
- cm.setInstanceListener(testInstanceListener);
-
-
- int ipcPort = ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT;
- int dataPort = ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT;
-
- HardwareDescription hardwareDescription = HardwareDescriptionFactory.construct(2, 2L * 1024L * 1024L * 1024L,
- 2L * 1024L * 1024L * 1024L);
-
- String hostname = "192.168.198.1";
- InetAddress address = InetAddress.getByName("192.168.198.1");
-
- InstanceConnectionInfo ici1 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 0, dataPort + 0);
- InstanceConnectionInfo ici2 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 15, dataPort + 15);
-
- // register three instances
- cm.reportHeartBeat(ici1, hardwareDescription);
- cm.reportHeartBeat(ici2, hardwareDescription);
-
-
- Map<InstanceType, InstanceTypeDescription> instanceTypeDescriptions = cm.getMapOfAvailableInstanceTypes();
- assertEquals(1, instanceTypeDescriptions.size());
-
- InstanceTypeDescription descr = instanceTypeDescriptions.entrySet().iterator().next().getValue();
-
- assertEquals(2, descr.getMaximumNumberOfAvailableInstances());
-
-
- // allocate something
- JobID jobID = new JobID();
- Configuration conf = new Configuration();
- InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
- instanceRequestMap.setNumberOfInstances(cm.getDefaultInstanceType(), 2);
- cm.requestInstance(jobID, conf, instanceRequestMap, null);
-
- ClusterManagerTestUtils.waitForInstances(jobID, testInstanceListener, 3, 1000);
-
- List<AllocatedResource> allocatedResources = testInstanceListener.getAllocatedResourcesForJob(jobID);
- assertEquals(2, allocatedResources.size());
-
- Iterator<AllocatedResource> it = allocatedResources.iterator();
- Set<AllocationID> allocationIDs = new HashSet<AllocationID>();
- while (it.hasNext()) {
- AllocatedResource allocatedResource = it.next();
- if (ConfigConstants.DEFAULT_INSTANCE_TYPE.equals(allocatedResource.getInstance().getType().getIdentifier())) {
- fail("Allocated unexpected instance of type "
- + allocatedResource.getInstance().getType().getIdentifier());
- }
-
- if (allocationIDs.contains(allocatedResource.getAllocationID())) {
- fail("Discovered allocation ID " + allocatedResource.getAllocationID() + " at least twice");
- } else {
- allocationIDs.add(allocatedResource.getAllocationID());
- }
- }
-
- // Try to allocate more resources which must result in an error
- try {
- InstanceRequestMap instancem = new InstanceRequestMap();
- instancem.setNumberOfInstances(cm.getDefaultInstanceType(), 1);
- cm.requestInstance(jobID, conf, instancem, null);
-
- fail("ClusterManager allowed to request more instances than actually available");
-
- } catch (InstanceException ie) {
- // Exception is expected and correct behavior here
- }
-
- // Release all allocated resources
- it = allocatedResources.iterator();
- while (it.hasNext()) {
- final AllocatedResource allocatedResource = it.next();
- cm.releaseAllocatedResource(jobID, conf, allocatedResource);
- }
-
- // Now further allocations should be possible
-
- InstanceRequestMap instancem = new InstanceRequestMap();
- instancem.setNumberOfInstances(cm.getDefaultInstanceType(), 1);
- cm.requestInstance(jobID, conf, instancem, null);
-
-
- cm.shutdown();
- }
- catch (Exception e) {
- System.err.println(e.getMessage());
- e.printStackTrace();
- Assert.fail("Test erroneous: " + e.getMessage());
- }
- }
-
- /**
- * This test checks the clean-up routines of the cluster manager.
- */
- @Test
- public void testCleanUp() {
- try {
-
- final int CLEANUP_INTERVAL = 2;
-
- // configure a short cleanup interval
- Configuration config = new Configuration();
- config.setInteger("instancemanager.cluster.cleanupinterval", CLEANUP_INTERVAL);
- GlobalConfiguration.includeConfiguration(config);
-
- ClusterManager cm = new ClusterManager();
- TestInstanceListener testInstanceListener = new TestInstanceListener();
- cm.setInstanceListener(testInstanceListener);
-
-
- int ipcPort = ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT;
- int dataPort = ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT;
-
- HardwareDescription hardwareDescription = HardwareDescriptionFactory.construct(2, 2L * 1024L * 1024L * 1024L,
- 2L * 1024L * 1024L * 1024L);
-
- String hostname = "192.168.198.1";
- InetAddress address = InetAddress.getByName("192.168.198.1");
-
- InstanceConnectionInfo ici1 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 0, dataPort + 0);
- InstanceConnectionInfo ici2 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 15, dataPort + 15);
- InstanceConnectionInfo ici3 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 30, dataPort + 30);
-
- // register three instances
- cm.reportHeartBeat(ici1, hardwareDescription);
- cm.reportHeartBeat(ici2, hardwareDescription);
- cm.reportHeartBeat(ici3, hardwareDescription);
-
-
- Map<InstanceType, InstanceTypeDescription> instanceTypeDescriptions = cm.getMapOfAvailableInstanceTypes();
- assertEquals(1, instanceTypeDescriptions.size());
-
- InstanceTypeDescription descr = instanceTypeDescriptions.entrySet().iterator().next().getValue();
- assertEquals(3, descr.getMaximumNumberOfAvailableInstances());
-
- // request some instances
- JobID jobID = new JobID();
- Configuration conf = new Configuration();
-
- InstanceRequestMap instancem = new InstanceRequestMap();
- instancem.setNumberOfInstances(cm.getDefaultInstanceType(), 1);
- cm.requestInstance(jobID, conf, instancem, null);
-
- ClusterManagerTestUtils.waitForInstances(jobID, testInstanceListener, 1, 1000);
- assertEquals(1, testInstanceListener.getNumberOfAllocatedResourcesForJob(jobID));
-
- // wait for the cleanup to kick in
- Thread.sleep(2000 * CLEANUP_INTERVAL);
-
- // check that the instances are gone
- ClusterManagerTestUtils.waitForInstances(jobID, testInstanceListener, 0, 1000);
- assertEquals(0, testInstanceListener.getNumberOfAllocatedResourcesForJob(jobID));
-
-
- instanceTypeDescriptions = cm.getMapOfAvailableInstanceTypes();
- assertEquals(1, instanceTypeDescriptions.size());
-
- descr = instanceTypeDescriptions.entrySet().iterator().next().getValue();
- assertEquals(0, descr.getMaximumNumberOfAvailableInstances());
-
- cm.shutdown();
- }
- catch (Exception e) {
- System.err.println(e.getMessage());
- e.printStackTrace();
- Assert.fail("Test erroneous: " + e.getMessage());
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTestUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTestUtils.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTestUtils.java
deleted file mode 100644
index e311533..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/ClusterManagerTestUtils.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.jobgraph.JobID;
-
-/**
- * This class contains utility methods used during the tests of the {@link ClusterManager} implementation.
- *
- */
-public class ClusterManagerTestUtils {
-
- /**
- * Granularity of the sleep time.
- */
- private static final long SLEEP_TIME = 10; // 10 milliseconds
-
- /**
- * Private constructor so the class cannot be instantiated.
- */
- private ClusterManagerTestUtils() {
- }
-
- /**
- * Waits until a specific number of instances have registered or deregistrations with the given
- * {@link InstanceListener} object for a given job or the maximum wait time has elapsed.
- *
- * @param jobID
- * the ID of the job to check the instance registration for
- * @param instanceListener
- * the listener which shall be notified when a requested instance is available for the job
- * @param numberOfInstances
- * the number of registered instances to wait for
- * @param maxWaitTime
- * the maximum wait time before this method returns
- */
- public static void waitForInstances(JobID jobID, TestInstanceListener instanceListener,
- int numberOfInstances, long maxWaitTime) {
-
- final long startTime = System.currentTimeMillis();
-
- while (instanceListener.getNumberOfAllocatedResourcesForJob(jobID) != numberOfInstances) {
- try {
- Thread.sleep(SLEEP_TIME);
- } catch (InterruptedException e) {
- break;
- }
-
- if ((System.currentTimeMillis() - startTime) >= maxWaitTime) {
- break;
- }
- }
- }
-}
[09/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AbstractInstance.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AbstractInstance.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AbstractInstance.java
deleted file mode 100644
index 56b4eae..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AbstractInstance.java
+++ /dev/null
@@ -1,297 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.List;
-import java.util.Set;
-
-import eu.stratosphere.nephele.deployment.TaskDeploymentDescriptor;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheProfileRequest;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheProfileResponse;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheUpdate;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
-import eu.stratosphere.nephele.taskmanager.TaskKillResult;
-import eu.stratosphere.runtime.io.channels.ChannelID;
-import eu.stratosphere.nephele.ipc.RPC;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.nephele.net.NetUtils;
-import eu.stratosphere.nephele.protocols.TaskOperationProtocol;
-import eu.stratosphere.nephele.taskmanager.TaskCancelResult;
-import eu.stratosphere.nephele.taskmanager.TaskSubmissionResult;
-import eu.stratosphere.nephele.topology.NetworkNode;
-import eu.stratosphere.nephele.topology.NetworkTopology;
-
-/**
- * An abstract instance represents a resource a {@link eu.stratosphere.nephele.taskmanager.TaskManager} runs on.
- *
- */
-public abstract class AbstractInstance extends NetworkNode {
-
- /**
- * The type of the instance.
- */
- private final InstanceType instanceType;
-
- /**
- * The connection info identifying the instance.
- */
- private final InstanceConnectionInfo instanceConnectionInfo;
-
- /**
- * The hardware description as reported by the instance itself.
- */
- private final HardwareDescription hardwareDescription;
-
- /**
- * Stores the RPC stub object for the instance's task manager.
- */
- private TaskOperationProtocol taskManager = null;
-
- /**
- * Constructs an abstract instance object.
- *
- * @param instanceType
- * the type of the instance
- * @param instanceConnectionInfo
- * the connection info identifying the instance
- * @param parentNode
- * the parent node in the network topology
- * @param networkTopology
- * the network topology this node is a part of
- * @param hardwareDescription
- * the hardware description provided by the instance itself
- */
- public AbstractInstance(final InstanceType instanceType, final InstanceConnectionInfo instanceConnectionInfo,
- final NetworkNode parentNode, final NetworkTopology networkTopology,
- final HardwareDescription hardwareDescription) {
- super((instanceConnectionInfo == null) ? null : instanceConnectionInfo.toString(), parentNode, networkTopology);
- this.instanceType = instanceType;
- this.instanceConnectionInfo = instanceConnectionInfo;
- this.hardwareDescription = hardwareDescription;
- }
-
- /**
- * Creates or returns the RPC stub object for the instance's task manager.
- *
- * @return the RPC stub object for the instance's task manager
- * @throws IOException
- * thrown if the RPC stub object for the task manager cannot be created
- */
- private TaskOperationProtocol getTaskManagerProxy() throws IOException {
-
- if (this.taskManager == null) {
-
- this.taskManager = RPC.getProxy(TaskOperationProtocol.class,
- new InetSocketAddress(getInstanceConnectionInfo().address(),
- getInstanceConnectionInfo().ipcPort()), NetUtils.getSocketFactory());
- }
-
- return this.taskManager;
- }
-
- /**
- * Destroys and removes the RPC stub object for this instance's task manager.
- */
- private void destroyTaskManagerProxy() {
-
- if (this.taskManager != null) {
- RPC.stopProxy(this.taskManager);
- this.taskManager = null;
- }
- }
-
- /**
- * Returns the type of the instance.
- *
- * @return the type of the instance
- */
- public final InstanceType getType() {
- return this.instanceType;
- }
-
- /**
- * Returns the instance's connection information object.
- *
- * @return the instance's connection information object
- */
- public final InstanceConnectionInfo getInstanceConnectionInfo() {
- return this.instanceConnectionInfo;
- }
-
- /**
- * Returns the instance's hardware description as reported by the instance itself.
- *
- * @return the instance's hardware description
- */
- public HardwareDescription getHardwareDescription() {
- return this.hardwareDescription;
- }
-
- /**
- * Checks if all the libraries required to run the job with the given
- * job ID are available on this instance. Any libary that is missing
- * is transferred to the instance as a result of this call.
- *
- * @param jobID
- * the ID of the job whose libraries are to be checked for
- * @throws IOException
- * thrown if an error occurs while checking for the libraries
- */
- public synchronized void checkLibraryAvailability(final JobID jobID) throws IOException {
-
- // Now distribute the required libraries for the job
- String[] requiredLibraries = LibraryCacheManager.getRequiredJarFiles(jobID);
-
- if (requiredLibraries == null) {
- throw new IOException("No entry of required libraries for job " + jobID);
- }
-
- LibraryCacheProfileRequest request = new LibraryCacheProfileRequest();
- request.setRequiredLibraries(requiredLibraries);
-
- // Send the request
- LibraryCacheProfileResponse response = null;
- response = getTaskManagerProxy().getLibraryCacheProfile(request);
-
- // Check response and transfer libraries if necessary
- for (int k = 0; k < requiredLibraries.length; k++) {
- if (!response.isCached(k)) {
- LibraryCacheUpdate update = new LibraryCacheUpdate(requiredLibraries[k]);
- getTaskManagerProxy().updateLibraryCache(update);
- }
- }
- }
-
- /**
- * Submits a list of tasks to the instance's {@link eu.stratosphere.nephele.taskmanager.TaskManager}.
- *
- * @param tasks
- * the list of tasks to be submitted
- * @return the result of the submission attempt
- * @throws IOException
- * thrown if an error occurs while transmitting the task
- */
- public synchronized List<TaskSubmissionResult> submitTasks(final List<TaskDeploymentDescriptor> tasks)
- throws IOException {
-
- return getTaskManagerProxy().submitTasks(tasks);
- }
-
- /**
- * Cancels the task identified by the given ID at the instance's
- * {@link eu.stratosphere.nephele.taskmanager.TaskManager}.
- *
- * @param id
- * the ID identifying the task to be canceled
- * @throws IOException
- * thrown if an error occurs while transmitting the request or receiving the response
- * @return the result of the cancel attempt
- */
- public synchronized TaskCancelResult cancelTask(final ExecutionVertexID id) throws IOException {
-
- return getTaskManagerProxy().cancelTask(id);
- }
-
- /**
- * Kills the task identified by the given ID at the instance's
- * {@link eu.stratosphere.nephele.taskmanager.TaskManager}.
- *
- * @param id
- * the ID identifying the task to be killed
- * @throws IOException
- * thrown if an error occurs while transmitting the request or receiving the response
- * @return the result of the kill attempt
- */
- public synchronized TaskKillResult killTask(final ExecutionVertexID id) throws IOException {
-
- return getTaskManagerProxy().killTask(id);
- }
-
- @Override
- public boolean equals(final Object obj) {
-
- // Fall back since dummy instances do not have a instanceConnectionInfo
- if (this.instanceConnectionInfo == null) {
- return super.equals(obj);
- }
-
- if (!(obj instanceof AbstractInstance)) {
- return false;
- }
-
- final AbstractInstance abstractInstance = (AbstractInstance) obj;
-
- return this.instanceConnectionInfo.equals(abstractInstance.getInstanceConnectionInfo());
- }
-
-
- @Override
- public int hashCode() {
-
- // Fall back since dummy instances do not have a instanceConnectionInfo
- if (this.instanceConnectionInfo == null) {
- return super.hashCode();
- }
-
- return this.instanceConnectionInfo.hashCode();
- }
-
- /**
- * Triggers the remote task manager to print out the current utilization of its read and write buffers to its logs.
- *
- * @throws IOException
- * thrown if an error occurs while transmitting the request
- */
- public synchronized void logBufferUtilization() throws IOException {
-
- getTaskManagerProxy().logBufferUtilization();
- }
-
- /**
- * Kills the task manager running on this instance. This method is mainly intended to test and debug Nephele's fault
- * tolerance mechanisms.
- *
- * @throws IOException
- * thrown if an error occurs while transmitting the request
- */
- public synchronized void killTaskManager() throws IOException {
-
- getTaskManagerProxy().killTaskManager();
- }
-
- /**
- * Invalidates the entries identified by the given channel IDs from the remote task manager's receiver lookup cache.
- *
- * @param channelIDs
- * the channel IDs identifying the cache entries to invalidate
- * @throws IOException
- * thrown if an error occurs during this remote procedure call
- */
- public synchronized void invalidateLookupCacheEntries(final Set<ChannelID> channelIDs) throws IOException {
-
- getTaskManagerProxy().invalidateLookupCacheEntries(channelIDs);
- }
-
- /**
- * Destroys all RPC stub objects attached to this instance.
- */
- public synchronized void destroyProxies() {
-
- destroyTaskManagerProxy();
-
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedResource.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedResource.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedResource.java
index eb0a835..7f2ad04 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedResource.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedResource.java
@@ -23,7 +23,7 @@ import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
/**
* An allocated resource object unambiguously defines the
* hardware resources which have been assigned to an {@link eu.stratosphere.nephele.executiongraph.ExecutionVertex} for
- * executing a task. The allocated resource is comprised of an {@link eu.stratosphere.nephele.instance.AbstractInstance}
+ * executing a task. The allocated resource is comprised of an {@link Instance}
* which identifies the node the task is scheduled to run on as well as an
* {@link eu.stratosphere.nephele.instance.AllocationID} which determines the resources the task is scheduled to
* allocate within the node.
@@ -36,12 +36,7 @@ public final class AllocatedResource {
/**
* The instance a task is scheduled to run on.
*/
- private final AbstractInstance instance;
-
- /**
- * The instance type this allocated resource represents.
- */
- private final InstanceType instanceType;
+ private final Instance instance;
/**
* The allocation ID identifying the resources within the instance
@@ -60,24 +55,20 @@ public final class AllocatedResource {
*
* @param instance
* the instance a task is scheduled to run on.
- * @param instanceType
- * the instance type this allocated resource represents
* @param allocationID
* the allocation ID identifying the allocated resources within the instance
*/
- public AllocatedResource(final AbstractInstance instance, final InstanceType instanceType,
- final AllocationID allocationID) {
+ public AllocatedResource(final Instance instance, final AllocationID allocationID) {
this.instance = instance;
- this.instanceType = instanceType;
this.allocationID = allocationID;
}
/**
* Returns the instance a task is scheduled to run on.
- *
+ *
* @return the instance a task is scheduled to run on
*/
- public AbstractInstance getInstance() {
+ public Instance getInstance() {
return this.instance;
}
@@ -90,15 +81,6 @@ public final class AllocatedResource {
return this.allocationID;
}
- /**
- * Returns the instance type this allocated resource represents.
- *
- * @return the instance type this allocated resource represents
- */
- public InstanceType getInstanceType() {
- return this.instanceType;
- }
-
@Override
public boolean equals(final Object obj) {
@@ -120,16 +102,6 @@ public final class AllocatedResource {
}
}
- if (this.instanceType == null) {
- if (allocatedResource.instance != null) {
- return false;
- }
- } else {
- if (!this.instanceType.equals(allocatedResource.getInstanceType())) {
- return false;
- }
- }
-
return true;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedSlot.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedSlot.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedSlot.java
new file mode 100644
index 0000000..0641944
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocatedSlot.java
@@ -0,0 +1,65 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance;
+
+import eu.stratosphere.nephele.jobgraph.JobID;
+
+/**
+ * An allocated slot is a part of an instance which is assigned to a job.
+ * <p>
+ * This class is thread-safe.
+ *
+ */
+public class AllocatedSlot {
+
+ /**
+ * The allocation ID which identifies the resources occupied by this slot.
+ */
+ private final AllocationID allocationID;
+
+ /**
+ * The ID of the job this slice belongs to.
+ */
+ private final JobID jobID;
+
+ /**
+ * Creates a new allocated slice on the given hosting instance.
+ *
+ * @param jobID
+ * the ID of the job this slice belongs to
+ */
+ public AllocatedSlot(final JobID jobID) {
+
+ this.allocationID = new AllocationID();
+ this.jobID = jobID;
+ }
+
+ /**
+ * Returns the allocation ID of this slice.
+ *
+ * @return the allocation ID of this slice
+ */
+ public AllocationID getAllocationID() {
+ return this.allocationID;
+ }
+
+ /**
+ * Returns the ID of the job this allocated slice belongs to.
+ *
+ * @return the ID of the job this allocated slice belongs to
+ */
+ public JobID getJobID() {
+ return this.jobID;
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocationID.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocationID.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocationID.java
index 3c83b80..3ed5013 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocationID.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/AllocationID.java
@@ -17,8 +17,8 @@ import eu.stratosphere.nephele.AbstractID;
/**
* An allocation ID unambiguously identifies the allocated resources
- * within an {@link AbstractInstance}. The ID is necessary if an {@link InstanceManager} decides to partition
- * {@link AbstractInstance}s
+ * within an {@link Instance}. The ID is necessary if an {@link InstanceManager} decides to partition
+ * {@link Instance}s
* without the knowledge of Nephele's scheduler.
*
*/
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DefaultInstanceManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DefaultInstanceManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DefaultInstanceManager.java
new file mode 100644
index 0000000..7d5f31b
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DefaultInstanceManager.java
@@ -0,0 +1,393 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance;
+
+import eu.stratosphere.configuration.Configuration;
+import eu.stratosphere.configuration.GlobalConfiguration;
+import eu.stratosphere.nephele.jobgraph.JobID;
+import eu.stratosphere.nephele.topology.NetworkNode;
+import eu.stratosphere.nephele.topology.NetworkTopology;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.util.Map;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Collection;
+import java.util.TimerTask;
+import java.util.Timer;
+
+/**
+ * In Nephele an instance manager maintains the set of available compute resources. It is responsible for allocating new
+ * compute resources,
+ * provisioning available compute resources to the JobManager and keeping track of the availability of the utilized
+ * compute resources in order
+ * to report unexpected resource outages.
+ *
+ */
+public class DefaultInstanceManager implements InstanceManager {
+
+ // ------------------------------------------------------------------------
+ // Internal Constants
+ // ------------------------------------------------------------------------
+
+ /**
+ * The log object used to report debugging and error information.
+ */
+ private static final Log LOG = LogFactory.getLog(DefaultInstanceManager.class);
+
+ /**
+ * Default duration after which a host is purged in case it did not send
+ * a heart-beat message.
+ */
+ private static final int DEFAULT_CLEANUP_INTERVAL = 2 * 60; // 2 min.
+
+ /**
+ * The key to retrieve the clean up interval from the configuration.
+ */
+ private static final String CLEANUP_INTERVAL_KEY = "instancemanager.cluster.cleanupinterval";
+
+ // ------------------------------------------------------------------------
+ // Fields
+ // ------------------------------------------------------------------------
+
+ private final Object lock = new Object();
+
+ /**
+ * Duration after which a host is purged in case it did not send a
+ * heart-beat message.
+ */
+ private final long cleanUpInterval;
+
+ /**
+ * Set of hosts known to run a task manager that are thus able to execute
+ * tasks.
+ */
+ private final Map<InstanceConnectionInfo, Instance> registeredHosts;
+
+ /**
+ * The network topology of the cluster.
+ */
+ private final NetworkTopology networkTopology;
+
+ /**
+ * Object that is notified if instances become available or vanish.
+ */
+ private InstanceListener instanceListener;
+
+
+ private boolean shutdown;
+
+ /**
+ * Periodic task that checks whether hosts have not sent their heart-beat
+ * messages and purges the hosts in this case.
+ */
+ private final TimerTask cleanupStaleMachines = new TimerTask() {
+
+ @Override
+ public void run() {
+
+ synchronized (DefaultInstanceManager.this.lock) {
+
+ final List<Map.Entry<InstanceConnectionInfo, Instance>> hostsToRemove =
+ new ArrayList<Map.Entry<InstanceConnectionInfo, Instance>>();
+
+ final Map<JobID, List<AllocatedResource>> staleResources = new HashMap<JobID, List<AllocatedResource>>();
+
+ // check all hosts whether they did not send heart-beat messages.
+ for (Map.Entry<InstanceConnectionInfo, Instance> entry : registeredHosts.entrySet()) {
+
+ final Instance host = entry.getValue();
+ if (!host.isStillAlive(cleanUpInterval)) {
+
+ // this host has not sent the heart-beat messages
+ // -> we terminate all instances running on this host and notify the jobs
+ final Collection<AllocatedSlot> slots = host.removeAllocatedSlots();
+ for (AllocatedSlot slot : slots) {
+
+ final JobID jobID = slot.getJobID();
+
+ List<AllocatedResource> staleResourcesOfJob = staleResources.get(jobID);
+ if (staleResourcesOfJob == null) {
+ staleResourcesOfJob = new ArrayList<AllocatedResource>();
+ staleResources.put(jobID, staleResourcesOfJob);
+ }
+
+ staleResourcesOfJob.add(new AllocatedResource(host, slot.getAllocationID()));
+ }
+
+ hostsToRemove.add(entry);
+ }
+ }
+
+ registeredHosts.entrySet().removeAll(hostsToRemove);
+
+ final Iterator<Map.Entry<JobID, List<AllocatedResource>>> it = staleResources.entrySet().iterator();
+ while (it.hasNext()) {
+ final Map.Entry<JobID, List<AllocatedResource>> entry = it.next();
+ if (instanceListener != null) {
+ instanceListener.allocatedResourcesDied(entry.getKey(), entry.getValue());
+ }
+ }
+ }
+ }
+ };
+
+ // ------------------------------------------------------------------------
+ // Constructor and set-up
+ // ------------------------------------------------------------------------
+
+ /**
+ * Constructor.
+ */
+ public DefaultInstanceManager() {
+
+ this.registeredHosts = new HashMap<InstanceConnectionInfo, Instance>();
+
+ long tmpCleanUpInterval = (long) GlobalConfiguration.getInteger(CLEANUP_INTERVAL_KEY, DEFAULT_CLEANUP_INTERVAL) * 1000;
+
+ if (tmpCleanUpInterval < 10) { // Clean up interval must be at least ten seconds
+ LOG.warn("Invalid clean up interval. Reverting to default cleanup interval of " + DEFAULT_CLEANUP_INTERVAL
+ + " secs.");
+ tmpCleanUpInterval = DEFAULT_CLEANUP_INTERVAL;
+ }
+
+ this.cleanUpInterval = tmpCleanUpInterval;
+
+ this.networkTopology = NetworkTopology.createEmptyTopology();
+
+ // look every BASEINTERVAL milliseconds for crashed hosts
+ final boolean runTimerAsDaemon = true;
+ new Timer(runTimerAsDaemon).schedule(cleanupStaleMachines, 1000, 1000);
+ }
+
+ @Override
+ public void shutdown() {
+ synchronized (this.lock) {
+ if (this.shutdown) {
+ return;
+ }
+
+ this.cleanupStaleMachines.cancel();
+
+ Iterator<Instance> it = this.registeredHosts.values().iterator();
+ while (it.hasNext()) {
+ it.next().destroyProxies();
+ }
+ this.registeredHosts.clear();
+
+ this.shutdown = true;
+ }
+ }
+
+ @Override
+ public void releaseAllocatedResource(AllocatedResource allocatedResource) throws InstanceException
+ {
+ synchronized (this.lock) {
+ // release the instance from the host
+ final Instance clusterInstance = allocatedResource.getInstance();
+ clusterInstance.releaseSlot(allocatedResource.getAllocationID());
+ }
+ }
+
+ /**
+ * Creates a new {@link Instance} object to manage instances that can
+ * be executed on that host.
+ *
+ * @param instanceConnectionInfo
+ * the connection information for the instance
+ * @param hardwareDescription
+ * the hardware description provided by the new instance
+ * @param numberOfSlots
+ * number of slots available on the instance
+ * @return a new {@link Instance} object or <code>null</code> if the cluster instance could not be created
+ */
+ private Instance createNewHost(final InstanceConnectionInfo instanceConnectionInfo,
+ final HardwareDescription hardwareDescription, int numberOfSlots) {
+
+ // Try to match new host with a stub host from the existing topology
+ String instanceName = instanceConnectionInfo.hostname();
+ NetworkNode parentNode = this.networkTopology.getRootNode();
+ NetworkNode currentStubNode = null;
+
+ // Try to match new host using the host name
+ while (true) {
+
+ currentStubNode = this.networkTopology.getNodeByName(instanceName);
+ if (currentStubNode != null) {
+ break;
+ }
+
+ final int pos = instanceName.lastIndexOf('.');
+ if (pos == -1) {
+ break;
+ }
+
+ /*
+ * If host name is reported as FQDN, iterative remove parts
+ * of the domain name until a match occurs or no more dots
+ * can be found in the host name.
+ */
+ instanceName = instanceName.substring(0, pos);
+ }
+
+ // Try to match the new host using the IP address
+ if (currentStubNode == null) {
+ instanceName = instanceConnectionInfo.address().toString();
+ instanceName = instanceName.replaceAll("/", ""); // Remove any / characters
+ currentStubNode = this.networkTopology.getNodeByName(instanceName);
+ }
+
+ if (currentStubNode != null) {
+ /*
+ * The instance name will be the same as the one of the stub node. That way
+ * the stub now will be removed from the network topology and replaced be
+ * the new node.
+ */
+ if (currentStubNode.getParentNode() != null) {
+ parentNode = currentStubNode.getParentNode();
+ }
+ // Remove the stub node from the tree
+ currentStubNode.remove();
+ }
+
+ LOG.info("Creating instance for " + instanceConnectionInfo + ", parent is "
+ + parentNode.getName());
+ final Instance host = new Instance(instanceConnectionInfo, parentNode,
+ this.networkTopology, hardwareDescription, numberOfSlots);
+
+ return host;
+ }
+
+ @Override
+ public void reportHeartBeat(InstanceConnectionInfo instanceConnectionInfo) {
+
+ synchronized (this.lock) {
+ Instance host = registeredHosts.get(instanceConnectionInfo);
+
+ if(host == null){
+ LOG.error("Task manager with connection info " + instanceConnectionInfo + " has not been registered.");
+ return;
+ }
+
+ host.reportHeartBeat();
+ }
+ }
+
+ @Override
+ public void registerTaskManager(InstanceConnectionInfo instanceConnectionInfo,
+ HardwareDescription hardwareDescription, int numberOfSlots){
+ synchronized(this.lock){
+ if(registeredHosts.containsKey(instanceConnectionInfo)){
+ LOG.error("Task manager with connection info " + instanceConnectionInfo + " has already been " +
+ "registered.");
+ return;
+ }
+
+ Instance host = createNewHost(instanceConnectionInfo, hardwareDescription, numberOfSlots);
+
+ if(host == null){
+ LOG.error("Could not create a new host object for register task manager for connection info " +
+ instanceConnectionInfo);
+ return;
+ }
+
+ this.registeredHosts.put(instanceConnectionInfo, host);
+ LOG.info("New number of registered hosts is " + this.registeredHosts.size());
+
+ host.reportHeartBeat();
+ }
+ }
+
+ @Override
+ public void requestInstance(JobID jobID, Configuration conf, int requiredSlots)
+ throws InstanceException
+ {
+
+ synchronized(this.lock) {
+ Iterator<Instance> clusterIterator = this.registeredHosts.values().iterator();
+ Instance instance = null;
+ List<AllocatedResource> allocatedResources = new ArrayList<AllocatedResource>();
+ int allocatedSlots = 0;
+
+ while(clusterIterator.hasNext()) {
+ instance = clusterIterator.next();
+ while(instance.getNumberOfAvailableSlots() >0 && allocatedSlots < requiredSlots){
+ AllocatedResource resource = instance.allocateSlot(jobID);
+ allocatedResources.add(resource);
+ allocatedSlots++;
+ }
+ }
+
+ if(allocatedSlots < requiredSlots){
+ throw new InstanceException("Cannot allocate the required number of slots: " + requiredSlots + ".");
+ }
+
+ if (this.instanceListener != null) {
+ final InstanceNotifier instanceNotifier = new InstanceNotifier(
+ this.instanceListener, jobID, allocatedResources);
+ instanceNotifier.start();
+ }
+ }
+ }
+
+ @Override
+ public NetworkTopology getNetworkTopology(JobID jobID) {
+ return this.networkTopology;
+ }
+
+ @Override
+ public void setInstanceListener(InstanceListener instanceListener) {
+ synchronized (this.lock) {
+ this.instanceListener = instanceListener;
+ }
+ }
+
+ @Override
+ public Instance getInstanceByName(String name) {
+ if (name == null) {
+ throw new IllegalArgumentException("Argument name must not be null");
+ }
+
+ synchronized (this.lock) {
+ final Iterator<Instance> it = this.registeredHosts.values().iterator();
+ while (it.hasNext()) {
+ final Instance instance = it.next();
+ if (name.equals(instance.getName())) {
+ return instance;
+ }
+ }
+ }
+
+ return null;
+ }
+
+ @Override
+ public int getNumberOfTaskTrackers() {
+ return this.registeredHosts.size();
+ }
+
+ @Override
+ public int getNumberOfSlots() {
+ int slots = 0;
+
+ for(Instance instance: registeredHosts.values()){
+ slots += instance.getNumberOfSlots();
+ }
+
+ return slots;
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DummyInstance.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DummyInstance.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DummyInstance.java
index 4e0f004..56f44c6 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DummyInstance.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/DummyInstance.java
@@ -14,32 +14,30 @@
package eu.stratosphere.nephele.instance;
/**
- * A DummyInstance is a stub implementation of the {@link AbstractInstance} interface.
+ * A DummyInstance is a stub implementation of the {@link Instance} interface.
* Dummy instances are used to plan a job execution but must be replaced with
* concrete instances before the job execution starts.
*
*/
-public class DummyInstance extends AbstractInstance {
+public class DummyInstance extends Instance {
private static int nextID = 0;
private final String name;
- public static synchronized DummyInstance createDummyInstance(InstanceType type) {
+ public static synchronized DummyInstance createDummyInstance() {
- return new DummyInstance(type, nextID++);
+ return new DummyInstance(nextID++);
}
/**
* Constructs a new dummy instance of the given instance type.
*
- * @param type
- * the type of the new dummy instance
* @param id
* the ID of the dummy instance
*/
- private DummyInstance(InstanceType type, int id) {
- super(type, null, null, null, null);
+ private DummyInstance(int id) {
+ super(null, null, null, null, 0);
this.name = "DummyInstance_" + Integer.toString(id);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Hardware.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Hardware.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Hardware.java
new file mode 100644
index 0000000..398a2a8
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Hardware.java
@@ -0,0 +1,24 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance;
+
+/**
+ * Convenience class to extract hardware specifics of the computer executing this class
+ */
+public class Hardware {
+
+ public static int getNumberCPUCores() {
+ return Runtime.getRuntime().availableProcessors();
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Instance.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Instance.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Instance.java
new file mode 100644
index 0000000..fa17745
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/Instance.java
@@ -0,0 +1,362 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Set;
+import java.util.Collection;
+
+import eu.stratosphere.nephele.deployment.TaskDeploymentDescriptor;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheProfileRequest;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheProfileResponse;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheUpdate;
+import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
+import eu.stratosphere.nephele.ipc.RPC;
+import eu.stratosphere.nephele.jobgraph.JobID;
+import eu.stratosphere.nephele.net.NetUtils;
+import eu.stratosphere.nephele.protocols.TaskOperationProtocol;
+import eu.stratosphere.nephele.taskmanager.TaskCancelResult;
+import eu.stratosphere.nephele.taskmanager.TaskKillResult;
+import eu.stratosphere.nephele.taskmanager.TaskSubmissionResult;
+import eu.stratosphere.nephele.topology.NetworkNode;
+import eu.stratosphere.nephele.topology.NetworkTopology;
+import eu.stratosphere.runtime.io.channels.ChannelID;
+
+/**
+ * An instance represents a resource a {@link eu.stratosphere.nephele.taskmanager.TaskManager} runs on.
+ *
+ */
+public class Instance extends NetworkNode {
+ /**
+ * The connection info identifying the instance.
+ */
+ private final InstanceConnectionInfo instanceConnectionInfo;
+
+ /**
+ * The hardware description as reported by the instance itself.
+ */
+ private final HardwareDescription hardwareDescription;
+
+ /**
+ * Number of slots available on the node
+ */
+ private final int numberOfSlots;
+
+ /**
+ * Allocated slots on this instance
+ */
+ private final Map<AllocationID, AllocatedSlot> allocatedSlots = new HashMap<AllocationID, AllocatedSlot>();
+
+ /**
+ * Stores the RPC stub object for the instance's task manager.
+ */
+ private TaskOperationProtocol taskManager = null;
+
+ /**
+ * Time when last heat beat has been received from the task manager running on this instance.
+ */
+ private long lastReceivedHeartBeat = System.currentTimeMillis();
+
+ /**
+ * Constructs an abstract instance object.
+ *
+ * @param instanceConnectionInfo
+ * the connection info identifying the instance
+ * @param parentNode
+ * the parent node in the network topology
+ * @param networkTopology
+ * the network topology this node is a part of
+ * @param hardwareDescription
+ * the hardware description provided by the instance itself
+ */
+ public Instance(final InstanceConnectionInfo instanceConnectionInfo,
+ final NetworkNode parentNode, final NetworkTopology networkTopology,
+ final HardwareDescription hardwareDescription, int numberOfSlots) {
+ super((instanceConnectionInfo == null) ? null : instanceConnectionInfo.toString(), parentNode, networkTopology);
+ this.instanceConnectionInfo = instanceConnectionInfo;
+ this.hardwareDescription = hardwareDescription;
+ this.numberOfSlots = numberOfSlots;
+ }
+
+ /**
+ * Creates or returns the RPC stub object for the instance's task manager.
+ *
+ * @return the RPC stub object for the instance's task manager
+ * @throws IOException
+ * thrown if the RPC stub object for the task manager cannot be created
+ */
+ private TaskOperationProtocol getTaskManagerProxy() throws IOException {
+
+ if (this.taskManager == null) {
+
+ this.taskManager = RPC.getProxy(TaskOperationProtocol.class,
+ new InetSocketAddress(getInstanceConnectionInfo().address(),
+ getInstanceConnectionInfo().ipcPort()), NetUtils.getSocketFactory());
+ }
+
+ return this.taskManager;
+ }
+
+ /**
+ * Destroys and removes the RPC stub object for this instance's task manager.
+ */
+ private void destroyTaskManagerProxy() {
+
+ if (this.taskManager != null) {
+ RPC.stopProxy(this.taskManager);
+ this.taskManager = null;
+ }
+ }
+
+ /**
+ * Returns the instance's connection information object.
+ *
+ * @return the instance's connection information object
+ */
+ public final InstanceConnectionInfo getInstanceConnectionInfo() {
+ return this.instanceConnectionInfo;
+ }
+
+ /**
+ * Returns the instance's hardware description as reported by the instance itself.
+ *
+ * @return the instance's hardware description
+ */
+ public HardwareDescription getHardwareDescription() {
+ return this.hardwareDescription;
+ }
+
+ /**
+ * Checks if all the libraries required to run the job with the given
+ * job ID are available on this instance. Any libary that is missing
+ * is transferred to the instance as a result of this call.
+ *
+ * @param jobID
+ * the ID of the job whose libraries are to be checked for
+ * @throws IOException
+ * thrown if an error occurs while checking for the libraries
+ */
+ public synchronized void checkLibraryAvailability(final JobID jobID) throws IOException {
+
+ // Now distribute the required libraries for the job
+ String[] requiredLibraries = LibraryCacheManager.getRequiredJarFiles(jobID);
+
+ if (requiredLibraries == null) {
+ throw new IOException("No entry of required libraries for job " + jobID);
+ }
+
+ LibraryCacheProfileRequest request = new LibraryCacheProfileRequest();
+ request.setRequiredLibraries(requiredLibraries);
+
+ // Send the request
+ LibraryCacheProfileResponse response = null;
+ response = getTaskManagerProxy().getLibraryCacheProfile(request);
+
+ // Check response and transfer libraries if necessary
+ for (int k = 0; k < requiredLibraries.length; k++) {
+ if (!response.isCached(k)) {
+ LibraryCacheUpdate update = new LibraryCacheUpdate(requiredLibraries[k]);
+ getTaskManagerProxy().updateLibraryCache(update);
+ }
+ }
+ }
+
+ /**
+ * Submits a list of tasks to the instance's {@link eu.stratosphere.nephele.taskmanager.TaskManager}.
+ *
+ * @param tasks
+ * the list of tasks to be submitted
+ * @return the result of the submission attempt
+ * @throws IOException
+ * thrown if an error occurs while transmitting the task
+ */
+ public synchronized List<TaskSubmissionResult> submitTasks(final List<TaskDeploymentDescriptor> tasks)
+ throws IOException {
+
+ return getTaskManagerProxy().submitTasks(tasks);
+ }
+
+ /**
+ * Cancels the task identified by the given ID at the instance's
+ * {@link eu.stratosphere.nephele.taskmanager.TaskManager}.
+ *
+ * @param id
+ * the ID identifying the task to be canceled
+ * @throws IOException
+ * thrown if an error occurs while transmitting the request or receiving the response
+ * @return the result of the cancel attempt
+ */
+ public synchronized TaskCancelResult cancelTask(final ExecutionVertexID id) throws IOException {
+
+ return getTaskManagerProxy().cancelTask(id);
+ }
+
+ /**
+ * Kills the task identified by the given ID at the instance's
+ * {@link eu.stratosphere.nephele.taskmanager.TaskManager}.
+ *
+ * @param id
+ * the ID identifying the task to be killed
+ * @throws IOException
+ * thrown if an error occurs while transmitting the request or receiving the response
+ * @return the result of the kill attempt
+ */
+ public synchronized TaskKillResult killTask(final ExecutionVertexID id) throws IOException {
+
+ return getTaskManagerProxy().killTask(id);
+ }
+
+ /**
+ * Updates the time of last received heart beat to the current system time.
+ */
+ public synchronized void reportHeartBeat() {
+ this.lastReceivedHeartBeat = System.currentTimeMillis();
+ }
+
+ /**
+ * Returns whether the host is still alive.
+ *
+ * @param cleanUpInterval
+ * duration (in milliseconds) after which a host is
+ * considered dead if it has no received heat-beats.
+ * @return <code>true</code> if the host has received a heat-beat before the <code>cleanUpInterval</code> duration
+ * has expired, <code>false</code> otherwise
+ */
+ public synchronized boolean isStillAlive(final long cleanUpInterval) {
+
+ if (this.lastReceivedHeartBeat + cleanUpInterval < System.currentTimeMillis()) {
+ return false;
+ }
+ return true;
+ }
+
+
+ @Override
+ public boolean equals(final Object obj) {
+
+ // Fall back since dummy instances do not have a instanceConnectionInfo
+ if (this.instanceConnectionInfo == null) {
+ return super.equals(obj);
+ }
+
+ if (!(obj instanceof Instance)) {
+ return false;
+ }
+
+ final Instance abstractInstance = (Instance) obj;
+
+ return this.instanceConnectionInfo.equals(abstractInstance.getInstanceConnectionInfo());
+ }
+
+
+ @Override
+ public int hashCode() {
+
+ // Fall back since dummy instances do not have a instanceConnectionInfo
+ if (this.instanceConnectionInfo == null) {
+ return super.hashCode();
+ }
+
+ return this.instanceConnectionInfo.hashCode();
+ }
+
+ /**
+ * Triggers the remote task manager to print out the current utilization of its read and write buffers to its logs.
+ *
+ * @throws IOException
+ * thrown if an error occurs while transmitting the request
+ */
+ public synchronized void logBufferUtilization() throws IOException {
+
+ getTaskManagerProxy().logBufferUtilization();
+ }
+
+ /**
+ * Kills the task manager running on this instance. This method is mainly intended to test and debug Nephele's fault
+ * tolerance mechanisms.
+ *
+ * @throws IOException
+ * thrown if an error occurs while transmitting the request
+ */
+ public synchronized void killTaskManager() throws IOException {
+
+ getTaskManagerProxy().killTaskManager();
+ }
+
+ /**
+ * Invalidates the entries identified by the given channel IDs from the remote task manager's receiver lookup cache.
+ *
+ * @param channelIDs
+ * the channel IDs identifying the cache entries to invalidate
+ * @throws IOException
+ * thrown if an error occurs during this remote procedure call
+ */
+ public synchronized void invalidateLookupCacheEntries(final Set<ChannelID> channelIDs) throws IOException {
+ getTaskManagerProxy().invalidateLookupCacheEntries(channelIDs);
+ }
+
+ /**
+ * Destroys all RPC stub objects attached to this instance.
+ */
+ public synchronized void destroyProxies() {
+
+ destroyTaskManagerProxy();
+
+ }
+
+ public int getNumberOfSlots() {
+ return numberOfSlots;
+ }
+
+ public int getNumberOfAvailableSlots() { return numberOfSlots - allocatedSlots.size(); }
+
+ public synchronized AllocatedResource allocateSlot(JobID jobID) throws InstanceException{
+ if(allocatedSlots.size() < numberOfSlots){
+ AllocatedSlot slot = new AllocatedSlot(jobID);
+
+ allocatedSlots.put(slot.getAllocationID(), slot);
+ return new AllocatedResource(this,slot.getAllocationID());
+ }else{
+ throw new InstanceException("Overbooking instance " + instanceConnectionInfo + ".");
+ }
+ }
+
+ public synchronized void releaseSlot(AllocationID allocationID) {
+ if(allocatedSlots.containsKey(allocationID)){
+ allocatedSlots.remove(allocationID);
+ }else{
+ throw new RuntimeException("There is no slot registered with allocation ID " + allocationID + ".");
+ }
+ }
+
+ public Collection<AllocatedSlot> getAllocatedSlots() {
+ return allocatedSlots.values();
+ }
+
+ public Collection<AllocatedSlot> removeAllocatedSlots() {
+ Collection<AllocatedSlot> slots = new ArrayList<AllocatedSlot>(this.allocatedSlots.values());
+
+ for(AllocatedSlot slot : slots){
+ releaseSlot(slot.getAllocationID());
+ }
+
+ return slots;
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceManager.java
index a1015b5..00795f4 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceManager.java
@@ -13,157 +13,32 @@
package eu.stratosphere.nephele.instance;
-import java.util.List;
-import java.util.Map;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.topology.NetworkTopology;
-/**
- * In Nephele an instance manager maintains the set of available compute resources. It is responsible for allocating new
- * compute resources,
- * provisioning available compute resources to the JobManager and keeping track of the availability of the utilized
- * compute resources in order
- * to report unexpected resource outages.
- *
- */
public interface InstanceManager {
- /**
- * Requests an instance of the provided instance type from the instance manager.
- *
- * @param jobID
- * the ID of the job this instance is requested for
- * @param conf
- * a configuration object including additional request information (e.g. credentials)
- * @param instanceRequestMap
- * a map specifying the instances requested by this call
- * @param count
- * the number of instances
- * @throws InstanceException
- * thrown if an error occurs during the instance request
- */
- void requestInstance(JobID jobID, Configuration conf, InstanceRequestMap instanceRequestMap,
- List<String> splitAffinityList) throws InstanceException;
- /**
- * Releases an allocated resource from a job.
- *
- * @param jobID
- * the ID of the job the instance has been used for
- * @param conf
- * a configuration object including additional release information (e.g. credentials)
- * @param allocatedResource
- * the allocated resource to be released
- * @throws InstanceException
- * thrown if an error occurs during the release process
- */
- void releaseAllocatedResource(JobID jobID, Configuration conf, AllocatedResource allocatedResource)
- throws InstanceException;
-
- /**
- * Suggests a suitable instance type according to the provided hardware characteristics.
- *
- * @param minNumComputeUnits
- * the minimum number of compute units
- * @param minNumCPUCores
- * the minimum number of CPU cores
- * @param minMemorySize
- * the minimum number of main memory (in MB)
- * @param minDiskCapacity
- * the minimum hard disk capacity (in GB)
- * @param maxPricePerHour
- * the maximum price per hour for the instance
- * @return the instance type matching the requested hardware profile best or <code>null</code> if no such instance
- * type is available
- */
- InstanceType getSuitableInstanceType(int minNumComputeUnits, int minNumCPUCores, int minMemorySize,
- int minDiskCapacity, int maxPricePerHour);
+ void shutdown();
- /**
- * Reports a heart beat message of an instance.
- *
- * @param instanceConnectionInfo
- * the {@link InstanceConnectionInfo} object attached to the heart beat message
- * @param hardwareDescription
- * a hardware description with details on the instance's compute resources.
- */
- void reportHeartBeat(InstanceConnectionInfo instanceConnectionInfo, HardwareDescription hardwareDescription);
+ void releaseAllocatedResource(AllocatedResource allocatedResource) throws InstanceException;
- /**
- * Translates the name of an instance type to the corresponding instance type object.
- *
- * @param instanceTypeName
- * the name of the instance type
- * @return the instance type object matching the name or <code>null</code> if no such instance type exists
- */
- InstanceType getInstanceTypeByName(String instanceTypeName);
+ void reportHeartBeat(InstanceConnectionInfo instanceConnectionInfo);
- /**
- * Returns the default instance type used by the instance manager.
- *
- * @return the default instance type
- */
- InstanceType getDefaultInstanceType();
+ void registerTaskManager(InstanceConnectionInfo instanceConnectionInfo,
+ HardwareDescription hardwareDescription, int numberOfSlots);
+ void requestInstance(JobID jobID, Configuration conf, int requiredSlots)
+ throws InstanceException;
- /**
- * Returns the network topology for the job with the given ID. The network topology
- * for the job might only be an excerpt of the overall network topology. It only
- * includes those instances as leaf nodes which are really allocated for the
- * execution of the job.
- *
- * @param jobID
- * the ID of the job to get the topology for
- * @return the network topology for the job
- */
NetworkTopology getNetworkTopology(JobID jobID);
- /**
- * Sets the {@link InstanceListener} object which is supposed to be
- * notified about instance availability and deaths.
- *
- * @param instanceListener
- * the instance listener to set for this instance manager
- */
void setInstanceListener(InstanceListener instanceListener);
- /**
- * Returns a map of all instance types which are currently available to Nephele. The map contains a description of
- * the hardware characteristics for each instance type as provided in the configuration file. Moreover, it contains
- * the actual hardware description as reported by task managers running on the individual instances. If available,
- * the map also contains the maximum number instances Nephele can allocate of each instance type (i.e. if no other
- * job occupies instances).
- *
- * @return a list of all instance types available to Nephele
- */
- Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes();
-
- /**
- * Returns the {@link AbstractInstance} with the given name.
- *
- * @param name
- * the name of the instance
- * @return the instance with the given name or <code>null</code> if no such instance could be found
- */
- AbstractInstance getInstanceByName(String name);
+ Instance getInstanceByName(String name);
- /**
- * Cancels all pending instance requests that might still exist for the job with the given ID.
- *
- * @param jobID
- * the ID of the job to cancel the pending instance requests for
- */
- void cancelPendingRequests(JobID jobID);
-
- /**
- * Shuts the instance manager down and stops all its internal processes.
- */
- void shutdown();
-
- /**
- *
- * @return the number of available (registered) TaskTrackers
- */
int getNumberOfTaskTrackers();
+
+ int getNumberOfSlots();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceNotifier.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceNotifier.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceNotifier.java
new file mode 100644
index 0000000..2df3d3d
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceNotifier.java
@@ -0,0 +1,71 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance;
+
+import java.util.List;
+
+import eu.stratosphere.nephele.instance.AllocatedResource;
+import eu.stratosphere.nephele.instance.InstanceListener;
+import eu.stratosphere.nephele.jobgraph.JobID;
+
+/**
+ * This class is an auxiliary class to send the notification
+ * about the availability of an {@link eu.stratosphere.nephele.instance.Instance} to the given {@link
+ * InstanceListener} object. The notification must be sent from
+ * a separate thread, otherwise the atomic operation of requesting an instance
+ * for a vertex and switching to the state ASSIGNING could not be guaranteed.
+ * This class is thread-safe.
+ *
+ */
+public class InstanceNotifier extends Thread {
+
+ /**
+ * The {@link InstanceListener} object to send the notification to.
+ */
+ private final InstanceListener instanceListener;
+
+ /**
+ * The ID of the job the notification refers to.
+ */
+ private final JobID jobID;
+
+ /**
+ * The allocated resources the notification refers to.
+ */
+ private final List<AllocatedResource> allocatedResources;
+
+ /**
+ * Constructs a new instance notifier object.
+ *
+ * @param instanceListener
+ * the listener to send the notification to
+ * @param jobID
+ * the ID of the job the notification refers to
+ * @param allocatedResources
+ * the resources with has been allocated for the job
+ */
+ public InstanceNotifier(final InstanceListener instanceListener, final JobID jobID,
+ final List<AllocatedResource> allocatedResources) {
+ this.instanceListener = instanceListener;
+ this.jobID = jobID;
+ this.allocatedResources = allocatedResources;
+ }
+
+
+ @Override
+ public void run() {
+
+ this.instanceListener.resourcesAllocated(this.jobID, this.allocatedResources);
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceRequestMap.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceRequestMap.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceRequestMap.java
deleted file mode 100644
index 4167f67..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceRequestMap.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * An instance request map specifies the required types of instances to run a specific job and the respective number
- * thereof. For each instance type it is possible to specify the minimum number of instances required to run the job. If
- * the {@link InstanceManager} cannot manage to provide at least this minimum numbers of instances for the given type,
- * the job will be rejected.
- * <p>
- * In addition, is it also possible to specify the optimal number of instances for a particular instance type. The
- * {@link InstanceManager} will try to provide this optimal number of instances, but will also start the job with less
- * instances.
- * <p>
- * This class is not thread-safe.
- *
- */
-public final class InstanceRequestMap {
-
- /**
- * The map holding the minimum number of instances to be requested for each instance type.
- */
- private final Map<InstanceType, Integer> minimumMap = new HashMap<InstanceType, Integer>();
-
- /**
- * The map holding the maximum number of instances to be requested for each instance type.
- */
- private final Map<InstanceType, Integer> maximumMap = new HashMap<InstanceType, Integer>();
-
- /**
- * Sets the minimum number of instances to be requested from the given instance type.
- *
- * @param instanceType
- * the type of instance to request
- * @param number
- * the minimum number of instances to request
- */
- public void setMinimumNumberOfInstances(final InstanceType instanceType, final int number) {
-
- this.minimumMap.put(instanceType, Integer.valueOf(number));
- }
-
- /**
- * Sets the maximum number of instances to be requested from the given instance type.
- *
- * @param instanceType
- * the type of instance to request
- * @param number
- * the maximum number of instances to request
- */
- public void setMaximumNumberOfInstances(final InstanceType instanceType, final int number) {
-
- this.maximumMap.put(instanceType, Integer.valueOf(number));
- }
-
- /**
- * Sets both the minimum and the maximum number of instances to be requested from the given instance type.
- *
- * @param instanceType
- * the type of instance to request
- * @param number
- * the minimum and the maximum number of instances to request
- */
- public void setNumberOfInstances(final InstanceType instanceType, final int number) {
-
- setMinimumNumberOfInstances(instanceType, number);
- setMaximumNumberOfInstances(instanceType, number);
- }
-
- /**
- * Returns the minimum number of instances to be requested from the given instance type.
- *
- * @param instanceType
- * the type of instance to request
- * @return the minimum number of instances to be requested from the given instance type
- */
- public int getMinimumNumberOfInstances(final InstanceType instanceType) {
-
- final Integer val = this.minimumMap.get(instanceType);
- if (val != null) {
- return val.intValue();
- }
-
- return 0;
- }
-
- /**
- * Returns the maximum number of instances to be requested from the given instance type.
- *
- * @param instanceType
- * the type of instance to request
- * @return the maximum number of instances to be requested from the given instance type
- */
- public int getMaximumNumberOfInstances(final InstanceType instanceType) {
-
- final Integer val = this.maximumMap.get(instanceType);
- if (val != null) {
- return val.intValue();
- }
-
- return 0;
- }
-
- /**
- * Checks if this instance request map is empty, i.e. neither contains an entry for the minimum or maximum number of
- * instances to be requested for any instance type.
- *
- * @return <code>true</code> if the map is empty, <code>false</code> otherwise
- */
- public boolean isEmpty() {
-
- if (!this.maximumMap.isEmpty()) {
- return false;
- }
-
- if (!this.minimumMap.isEmpty()) {
- return false;
- }
-
- return true;
- }
-
- /**
- * Returns an {@link Iterator} object which allows to traverse the minimum number of instances to be requested for
- * each instance type.
- *
- * @return an iterator to traverse the minimum number of instances to be requested for each instance type
- */
- public Iterator<Map.Entry<InstanceType, Integer>> getMaximumIterator() {
-
- return this.maximumMap.entrySet().iterator();
- }
-
- /**
- * Returns an {@link Iterator} object which allows to traverse the maximum number of instances to be requested for
- * each instance type.
- *
- * @return an iterator to traverse the maximum number of instances to be requested for each instance type
- */
- public Iterator<Map.Entry<InstanceType, Integer>> getMinimumIterator() {
-
- return this.minimumMap.entrySet().iterator();
- }
-
- /**
- * Returns the number of different instance types stored in this request map.
- *
- * @return the number of different instance types stored in this request map
- */
- public int size() {
-
- final int s = this.maximumMap.size();
-
- if (s != this.minimumMap.size()) {
- throw new IllegalStateException("InstanceRequestMap is in an inconsistent state");
- }
-
- return s;
- }
-
- /**
- * Clears the instance request map.
- */
- public void clear() {
-
- this.maximumMap.clear();
- this.minimumMap.clear();
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceType.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceType.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceType.java
deleted file mode 100644
index f2bb4e5..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceType.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import eu.stratosphere.core.io.IOReadableWritable;
-import eu.stratosphere.core.io.StringRecord;
-
-/**
- * An instance type describes the hardware resources a task manager runs on. According
- * to its type an instance has a specific number of CPU cores, computation units, a certain
- * amount of main memory and disk space. In addition, it has a specific price per hour.
- *
- */
-public final class InstanceType implements IOReadableWritable {
-
- /**
- * The identifier for this instance type.
- */
- private String identifier;
-
- /**
- * The number of computational units of this instance type.
- * A computational unit is a virtual compute capacity. A host with a
- * single-core 2 GHz CPU may possess 20 compute units (1*20), while a
- * dual-core 2.5 GHz CPU may possess 50 compute units (2*25). The
- * specified number of compute units expresses the fraction of the
- * CPU capacity promised to a user.
- */
- private int numberOfComputeUnits = 0;
-
- /**
- * The number of CPU cores of this instance type.
- */
- private int numberOfCores = 0;
-
- /**
- * The amount of main memory of this instance type (in MB).
- */
- private int memorySize = 0;
-
- /**
- * The disk capacity of this instance type (in GB).
- */
- private int diskCapacity = 0;
-
- /**
- * The price per hour that is charged for running instances of this type.
- */
- private int pricePerHour = 0;
-
- /**
- * Public constructor required for the serialization process.
- */
- public InstanceType() {
- }
-
- /**
- * Creates a new instance type.
- *
- * @param identifier
- * identifier for this instance type
- * @param numberOfComputeUnits
- * number of computational units of this instance type
- * @param numberOfCores
- * number of CPU cores of this instance type
- * @param memorySize
- * amount of main memory of this instance type (in MB)
- * @param diskCapacity
- * disk capacity of this instance type (in GB)
- * @param pricePerHour
- * price per hour that is charged for running instances of this type
- */
- InstanceType(final String identifier, final int numberOfComputeUnits, final int numberOfCores,
- final int memorySize,
- final int diskCapacity, final int pricePerHour) {
-
- this.identifier = identifier;
- this.numberOfComputeUnits = numberOfComputeUnits;
- this.numberOfCores = numberOfCores;
- this.memorySize = memorySize;
- this.diskCapacity = diskCapacity;
- this.pricePerHour = pricePerHour;
- }
-
- /**
- * Returns the instance type's number of computational units.
- *
- * @return the instance type's number of computational units
- */
- public int getNumberOfComputeUnits() {
- return this.numberOfComputeUnits;
- }
-
- /**
- * Returns the instance type's number of CPU cores.
- *
- * @return the instance type's number of CPU cores
- */
- public int getNumberOfCores() {
- return this.numberOfCores;
- }
-
- /**
- * Returns the instance type's amount of main memory.
- *
- * @return the instance type's amount of main memory
- */
- public int getMemorySize() {
- return this.memorySize;
- }
-
- /**
- * Returns the instance type's disk capacity.
- *
- * @return the instance type's disk capacity
- */
- public int getDiskCapacity() {
- return this.diskCapacity;
- }
-
- /**
- * Returns the instance type's price per hour.
- *
- * @return the instance type's price per hour
- */
- public int getPricePerHour() {
- return this.pricePerHour;
- }
-
- /**
- * Returns the instance type's identifier.
- *
- * @return the instance type's identifier
- */
- public String getIdentifier() {
- return this.identifier;
- }
-
-
- @Override
- public String toString() {
-
- final StringBuilder bld = new StringBuilder(32);
- bld.append(this.identifier);
- bld.append(' ');
- bld.append('(');
- bld.append(this.numberOfComputeUnits);
- bld.append(',');
- bld.append(this.numberOfCores);
- bld.append(',');
- bld.append(this.memorySize);
- bld.append(',');
- bld.append(this.diskCapacity);
- bld.append(',');
- bld.append(this.pricePerHour);
- bld.append(')');
-
- return bld.toString();
- }
-
-
- @Override
- public void write(final DataOutput out) throws IOException {
-
- StringRecord.writeString(out, this.identifier);
- out.writeInt(this.numberOfComputeUnits);
- out.writeInt(this.numberOfCores);
- out.writeInt(this.memorySize);
- out.writeInt(this.diskCapacity);
- out.writeInt(this.pricePerHour);
- }
-
-
- @Override
- public void read(final DataInput in) throws IOException {
-
- this.identifier = StringRecord.readString(in);
- this.numberOfComputeUnits = in.readInt();
- this.numberOfCores = in.readInt();
- this.memorySize = in.readInt();
- this.diskCapacity = in.readInt();
- this.pricePerHour = in.readInt();
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescription.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescription.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescription.java
deleted file mode 100644
index ce0a694..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescription.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import eu.stratosphere.core.io.IOReadableWritable;
-
-/**
- * An instance type description provides details of instance type. Is can comprise both the hardware description from
- * the instance type description (as provided by the operator/administrator of the instance) as well as the actual
- * hardware description which has been determined on the compute instance itself.
- *
- */
-public final class InstanceTypeDescription implements IOReadableWritable {
-
- /**
- * The instance type.
- */
- private InstanceType instanceType = null;
-
- /**
- * The hardware description as created by the {@link InstanceManager}.
- */
- private HardwareDescription hardwareDescription = null;
-
- /**
- * The maximum number of available instances of this type.
- */
- private int maximumNumberOfAvailableInstances = 0;
-
- /**
- * Public default constructor required for serialization process.
- */
- public InstanceTypeDescription() {
- }
-
- /**
- * Constructs a new instance type description.
- *
- * @param instanceType
- * the instance type
- * @param hardwareDescription
- * the hardware description as created by the {@link InstanceManager}
- * @param maximumNumberOfAvailableInstances
- * the maximum number of available instances of this type
- */
- InstanceTypeDescription(final InstanceType instanceType, final HardwareDescription hardwareDescription,
- final int maximumNumberOfAvailableInstances) {
-
- this.instanceType = instanceType;
- this.hardwareDescription = hardwareDescription;
- this.maximumNumberOfAvailableInstances = maximumNumberOfAvailableInstances;
- }
-
-
- @Override
- public void write(final DataOutput out) throws IOException {
-
- if (this.instanceType == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- this.instanceType.write(out);
- }
-
- if (this.hardwareDescription == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- this.hardwareDescription.write(out);
- }
-
- out.writeInt(this.maximumNumberOfAvailableInstances);
- }
-
-
- @Override
- public void read(final DataInput in) throws IOException {
-
- if (in.readBoolean()) {
- this.instanceType = new InstanceType();
- this.instanceType.read(in);
- } else {
- this.instanceType = null;
- }
-
- if (in.readBoolean()) {
- this.hardwareDescription = new HardwareDescription();
- this.hardwareDescription.read(in);
- }
-
- this.maximumNumberOfAvailableInstances = in.readInt();
- }
-
- /**
- * Returns the hardware description as created by the {@link InstanceManager}.
- *
- * @return the instance's hardware description or <code>null</code> if no description is available
- */
- public HardwareDescription getHardwareDescription() {
- return this.hardwareDescription;
- }
-
- /**
- * Returns the instance type as determined by the {@link InstanceManager}.
- *
- * @return the instance type
- */
- public InstanceType getInstanceType() {
- return this.instanceType;
- }
-
- /**
- * Returns the maximum number of instances the {@link InstanceManager} can at most allocate of this instance type
- * (i.e. when no other jobs are occupying any resources).
- *
- * @return the maximum number of instances of this type or <code>-1</code> if the number is unknown to the
- * {@link InstanceManager}
- */
- public int getMaximumNumberOfAvailableInstances() {
- return this.maximumNumberOfAvailableInstances;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescriptionFactory.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescriptionFactory.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescriptionFactory.java
deleted file mode 100644
index 2b3e7db..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeDescriptionFactory.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance;
-
-/**
- * This factory produces {@link InstanceTypeDescription} objects.
- * <p>
- * This class is thread-safe.
- *
- */
-public class InstanceTypeDescriptionFactory {
-
- /**
- * Private constructor, so class cannot be instantiated.
- */
- private InstanceTypeDescriptionFactory() {
- }
-
- /**
- * Constructs a new {@link InstaceTypeDescription} object.
- *
- * @param instanceType
- * the instance type
- * @param hardwareDescription
- * the hardware description as created by the {@link InstanceManager}
- * @param numberOfAvailableInstances
- * the number of available instances of this type
- * @return the instance type description
- */
- public static InstanceTypeDescription construct(InstanceType instanceType, HardwareDescription hardwareDescription,
- int numberOfAvailableInstances) {
-
- return new InstanceTypeDescription(instanceType, hardwareDescription, numberOfAvailableInstances);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeFactory.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeFactory.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeFactory.java
deleted file mode 100644
index ff501c4..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/InstanceTypeFactory.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance;
-
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * This factory constructs {@link InstanceType} objects.
- *
- */
-public class InstanceTypeFactory {
-
- /**
- * The logger used to report errors.
- */
- private static final Log LOG = LogFactory.getLog(InstanceTypeFactory.class);
-
- /**
- * The pattern used to parse the hardware descriptions of instance types.
- */
- private static Pattern INSTANCE_TYPE_PATTERN = Pattern.compile("^([^,]+),(\\d+),(\\d+),(\\d+),(\\d+),(\\d+)$");
-
- /**
- * Private constructor, so class cannot be instantiated.
- */
- private InstanceTypeFactory() {
- }
-
- /**
- * Constructs an {@link InstanceType} object by parsing a hardware description string.
- *
- * @param description
- * the hardware description reflected by this instance type
- * @return an instance type reflecting the given hardware description or <code>null</code> if the description cannot
- * be parsed
- */
- public static InstanceType constructFromDescription(String description) {
-
- final Matcher m = INSTANCE_TYPE_PATTERN.matcher(description);
- if (!m.matches()) {
- LOG.error("Cannot extract instance type from string " + description);
- return null;
- }
-
- final String identifier = m.group(1);
- final int numComputeUnits = Integer.parseInt(m.group(2));
- final int numCores = Integer.parseInt(m.group(3));
- final int memorySize = Integer.parseInt(m.group(4));
- final int diskCapacity = Integer.parseInt(m.group(5));
- final int pricePerHour = Integer.parseInt(m.group(6));
-
- return new InstanceType(identifier, numComputeUnits, numCores, memorySize, diskCapacity, pricePerHour);
- }
-
- /**
- * Constructs an {@link InstanceType} from the given parameters.
- *
- * @param identifier
- * identifier for this instance type
- * @param numberOfComputeUnits
- * number of computational units of this instance type
- * @param numberOfCores
- * number of CPU cores of this instance type
- * @param memorySize
- * amount of main memory of this instance type (in MB)
- * @param diskCapacity
- * disk capacity of this instance type (in GB)
- * @param pricePerHour
- * price per hour that is charged for running instances of this type
- */
- public static InstanceType construct(String identifier, int numberOfComputeUnits, int numberOfCores,
- int memorySize, int diskCapacity, int pricePerHour) {
-
- return new InstanceType(identifier, numberOfComputeUnits, numberOfCores, memorySize, diskCapacity, pricePerHour);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/LocalInstanceManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/LocalInstanceManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/LocalInstanceManager.java
new file mode 100644
index 0000000..1576649
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/instance/LocalInstanceManager.java
@@ -0,0 +1,60 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance;
+
+
+import eu.stratosphere.configuration.ConfigConstants;
+import eu.stratosphere.configuration.Configuration;
+import eu.stratosphere.configuration.GlobalConfiguration;
+import eu.stratosphere.nephele.ExecutionMode;
+import eu.stratosphere.nephele.taskmanager.TaskManager;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class LocalInstanceManager extends DefaultInstanceManager {
+
+ private List<TaskManager> taskManagers = new ArrayList<TaskManager>();
+
+ public LocalInstanceManager() throws Exception{
+ int numTaskManager = GlobalConfiguration.getInteger(ConfigConstants
+ .LOCAL_INSTANCE_MANAGER_NUMBER_TASK_MANAGER, 1);
+
+ ExecutionMode execMode = numTaskManager == 1 ? ExecutionMode.LOCAL : ExecutionMode.CLUSTER;
+
+ for (int i=0; i < numTaskManager; i++){
+ Configuration tm = new Configuration();
+ int ipcPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_IPC_PORT_KEY,
+ ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT);
+ int dataPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY,
+ ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT);
+
+ tm.setInteger(ConfigConstants.TASK_MANAGER_IPC_PORT_KEY, ipcPort + i);
+ tm.setInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, dataPort + i);
+
+ GlobalConfiguration.includeConfiguration(tm);
+
+ taskManagers.add(new TaskManager(execMode));
+ }
+ }
+
+ @Override
+ public void shutdown(){
+ for(TaskManager taskManager: taskManagers){
+ taskManager.shutdown();
+ }
+
+ super.shutdown();
+ }
+}
[19/22] Merge fix to omit input/output registering on JobManager
Rework Invokable Task Hierarchy
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/DoubleSourceTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/DoubleSourceTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/DoubleSourceTask.java
new file mode 100644
index 0000000..aa46af8
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/DoubleSourceTask.java
@@ -0,0 +1,134 @@
+/***********************************************************************************************************************
+ *
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ *
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.util.tasks;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import eu.stratosphere.core.fs.FSDataInputStream;
+import eu.stratosphere.core.fs.FileInputSplit;
+import eu.stratosphere.core.fs.FileSystem;
+import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
+import eu.stratosphere.nephele.template.InputSplitProvider;
+import eu.stratosphere.runtime.io.api.RecordWriter;
+import eu.stratosphere.runtime.fs.LineReader;
+
+public class DoubleSourceTask extends AbstractInvokable {
+
+ private RecordWriter<StringRecord> output1 = null;
+
+ private RecordWriter<StringRecord> output2 = null;
+
+ @Override
+ public void invoke() throws Exception {
+ this.output1.initializeSerializers();
+ this.output2.initializeSerializers();
+
+ final Iterator<FileInputSplit> splitIterator = getInputSplits();
+
+ while (splitIterator.hasNext()) {
+
+ final FileInputSplit split = splitIterator.next();
+
+ final long start = split.getStart();
+ final long length = split.getLength();
+
+ final FileSystem fs = FileSystem.get(split.getPath().toUri());
+
+ final FSDataInputStream fdis = fs.open(split.getPath());
+
+ final LineReader lineReader = new LineReader(fdis, start, length, (1024 * 1024));
+
+ byte[] line = lineReader.readLine();
+
+ while (line != null) {
+
+ // Create a string object from the data read
+ StringRecord str = new StringRecord();
+ str.set(line);
+
+ // Send out string
+ output1.emit(str);
+ output2.emit(str);
+
+ line = lineReader.readLine();
+ }
+
+ // Close the stream;
+ lineReader.close();
+ }
+
+ this.output1.flush();
+ this.output2.flush();
+ }
+
+ @Override
+ public void registerInputOutput() {
+ this.output1 = new RecordWriter<StringRecord>(this);
+ this.output2 = new RecordWriter<StringRecord>(this);
+ }
+
+ private Iterator<FileInputSplit> getInputSplits() {
+
+ final InputSplitProvider provider = getEnvironment().getInputSplitProvider();
+
+ return new Iterator<FileInputSplit>() {
+
+ private FileInputSplit nextSplit;
+
+ private boolean exhausted;
+
+ @Override
+ public boolean hasNext() {
+ if (exhausted) {
+ return false;
+ }
+
+ if (nextSplit != null) {
+ return true;
+ }
+
+ FileInputSplit split = (FileInputSplit) provider.getNextInputSplit();
+
+ if (split != null) {
+ this.nextSplit = split;
+ return true;
+ }
+ else {
+ exhausted = true;
+ return false;
+ }
+ }
+
+ @Override
+ public FileInputSplit next() {
+ if (this.nextSplit == null && !hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ final FileInputSplit tmp = this.nextSplit;
+ this.nextSplit = null;
+ return tmp;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineReader.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineReader.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineReader.java
new file mode 100644
index 0000000..c62911a
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineReader.java
@@ -0,0 +1,133 @@
+/***********************************************************************************************************************
+ *
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ *
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.util.tasks;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import eu.stratosphere.core.fs.FSDataInputStream;
+import eu.stratosphere.core.fs.FileInputSplit;
+import eu.stratosphere.core.fs.FileSystem;
+import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.runtime.io.api.RecordWriter;
+import eu.stratosphere.nephele.template.AbstractInvokable;
+import eu.stratosphere.nephele.template.InputSplitProvider;
+import eu.stratosphere.runtime.fs.LineReader;
+
+/**
+ * A file line reader reads the associated file input splits line by line and outputs the lines as string records.
+ *
+ */
+public class FileLineReader extends AbstractInvokable {
+
+ private RecordWriter<StringRecord> output = null;
+
+ @Override
+ public void invoke() throws Exception {
+
+ output.initializeSerializers();
+
+ final Iterator<FileInputSplit> splitIterator = getInputSplits();
+
+ while (splitIterator.hasNext()) {
+
+ final FileInputSplit split = splitIterator.next();
+
+ long start = split.getStart();
+ long length = split.getLength();
+
+ final FileSystem fs = FileSystem.get(split.getPath().toUri());
+
+ final FSDataInputStream fdis = fs.open(split.getPath());
+
+ final LineReader lineReader = new LineReader(fdis, start, length, (1024 * 1024));
+
+ byte[] line = lineReader.readLine();
+
+ while (line != null) {
+
+ // Create a string object from the data read
+ StringRecord str = new StringRecord();
+ str.set(line);
+
+ // Send out string
+ output.emit(str);
+
+ line = lineReader.readLine();
+ }
+
+ // Close the stream;
+ lineReader.close();
+ }
+
+ this.output.flush();
+ }
+
+ @Override
+ public void registerInputOutput() {
+ output = new RecordWriter<StringRecord>(this);
+ }
+
+ private Iterator<FileInputSplit> getInputSplits() {
+
+ final InputSplitProvider provider = getEnvironment().getInputSplitProvider();
+
+ return new Iterator<FileInputSplit>() {
+
+ private FileInputSplit nextSplit;
+
+ private boolean exhausted;
+
+ @Override
+ public boolean hasNext() {
+ if (exhausted) {
+ return false;
+ }
+
+ if (nextSplit != null) {
+ return true;
+ }
+
+ FileInputSplit split = (FileInputSplit) provider.getNextInputSplit();
+
+ if (split != null) {
+ this.nextSplit = split;
+ return true;
+ }
+ else {
+ exhausted = true;
+ return false;
+ }
+ }
+
+ @Override
+ public FileInputSplit next() {
+ if (this.nextSplit == null && !hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ final FileInputSplit tmp = this.nextSplit;
+ this.nextSplit = null;
+ return tmp;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineWriter.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineWriter.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineWriter.java
new file mode 100644
index 0000000..5f6e2b2
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/FileLineWriter.java
@@ -0,0 +1,72 @@
+/***********************************************************************************************************************
+ *
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ *
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.util.tasks;
+
+import eu.stratosphere.configuration.Configuration;
+import eu.stratosphere.core.fs.FSDataOutputStream;
+import eu.stratosphere.core.fs.FileStatus;
+import eu.stratosphere.core.fs.FileSystem;
+import eu.stratosphere.core.fs.Path;
+import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
+import eu.stratosphere.runtime.io.api.RecordReader;
+
+/**
+ * A file line writer reads string records its input gate and writes them to the associated output file.
+ *
+ */
+public class FileLineWriter extends AbstractInvokable {
+ /**
+ * The record reader through which incoming string records are received.
+ */
+ private RecordReader<StringRecord> input = null;
+
+
+ @Override
+ public void invoke() throws Exception {
+
+ final Configuration conf = getEnvironment().getTaskConfiguration();
+ final String outputPathString = conf.getString(JobFileOutputVertex.PATH_PROPERTY, null);
+
+ Path outputPath = new Path(outputPathString);
+
+ FileSystem fs = FileSystem.get(outputPath.toUri());
+ if (fs.exists(outputPath)) {
+ FileStatus status = fs.getFileStatus(outputPath);
+
+ if (status.isDir()) {
+ outputPath = new Path(outputPath.toUri().toString() + "/file_" + getIndexInSubtaskGroup() + ".txt");
+ }
+ }
+
+ final FSDataOutputStream outputStream = fs.create(outputPath, true);
+
+ while (this.input.hasNext()) {
+
+ StringRecord record = this.input.next();
+ byte[] recordByte = (record.toString() + "\r\n").getBytes();
+ outputStream.write(recordByte, 0, recordByte.length);
+ }
+
+ outputStream.close();
+
+ }
+
+ @Override
+ public void registerInputOutput() {
+ this.input = new RecordReader<StringRecord>(this, StringRecord.class);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileInputVertex.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileInputVertex.java
new file mode 100644
index 0000000..fb0da91
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileInputVertex.java
@@ -0,0 +1,255 @@
+/***********************************************************************************************************************
+ *
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ *
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.util.tasks;
+
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import eu.stratosphere.core.fs.BlockLocation;
+import eu.stratosphere.core.fs.FileInputSplit;
+import eu.stratosphere.core.fs.FileStatus;
+import eu.stratosphere.core.fs.FileSystem;
+import eu.stratosphere.core.fs.Path;
+import eu.stratosphere.core.io.InputSplit;
+import eu.stratosphere.nephele.jobgraph.AbstractJobInputVertex;
+import eu.stratosphere.nephele.jobgraph.JobGraph;
+import eu.stratosphere.nephele.jobgraph.JobVertexID;
+
+
+public final class JobFileInputVertex extends AbstractJobInputVertex {
+
+ /**
+ * The fraction that the last split may be larger than the others.
+ */
+ private static final float MAX_SPLIT_SIZE_DISCREPANCY = 1.1f;
+
+ /**
+ * The path pointing to the input file/directory.
+ */
+ private Path path;
+
+
+ public JobFileInputVertex(String name, JobVertexID id, JobGraph jobGraph) {
+ super(name, id, jobGraph);
+ }
+
+ /**
+ * Creates a new job file input vertex with the specified name.
+ *
+ * @param name
+ * the name of the new job file input vertex
+ * @param jobGraph
+ * the job graph this vertex belongs to
+ */
+ public JobFileInputVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
+ }
+
+ /**
+ * Creates a new job file input vertex.
+ *
+ * @param jobGraph
+ * the job graph this vertex belongs to
+ */
+ public JobFileInputVertex(JobGraph jobGraph) {
+ this(null, jobGraph);
+ }
+
+ /**
+ * Sets the path of the file the job file input vertex's task should read from.
+ *
+ * @param path
+ * the path of the file the job file input vertex's task should read from
+ */
+ public void setFilePath(final Path path) {
+ this.path = path;
+ }
+
+ /**
+ * Returns the path of the file the job file input vertex's task should read from.
+ *
+ * @return the path of the file the job file input vertex's task should read from or <code>null</code> if no path
+ * has yet been set
+ */
+ public Path getFilePath() {
+ return this.path;
+ }
+
+ @Override
+ public void read(final DataInput in) throws IOException {
+ super.read(in);
+
+ // Read path of the input file
+ final boolean isNotNull = in.readBoolean();
+ if (isNotNull) {
+ this.path = new Path();
+ this.path.read(in);
+ }
+ }
+
+ @Override
+ public void write(final DataOutput out) throws IOException {
+ super.write(out);
+
+ // Write out the path of the input file
+ if (this.path == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ this.path.write(out);
+ }
+ }
+
+ // --------------------------------------------------------------------------------------------
+
+
+ @Override
+ public InputSplit[] getInputSplits(int minNumSplits) throws Exception {
+ final Path path = this.path;
+ final List<FileInputSplit> inputSplits = new ArrayList<FileInputSplit>();
+
+ // get all the files that are involved in the splits
+ final List<FileStatus> files = new ArrayList<FileStatus>();
+ long totalLength = 0;
+
+ final FileSystem fs = path.getFileSystem();
+ final FileStatus pathFile = fs.getFileStatus(path);
+
+ if (pathFile.isDir()) {
+ // input is directory. list all contained files
+ final FileStatus[] dir = fs.listStatus(path);
+ for (int i = 0; i < dir.length; i++) {
+ if (!dir[i].isDir()) {
+ files.add(dir[i]);
+ totalLength += dir[i].getLen();
+ }
+ }
+
+ } else {
+ files.add(pathFile);
+ totalLength += pathFile.getLen();
+ }
+
+ final long minSplitSize = 1;
+ final long maxSplitSize = (minNumSplits < 1) ? Long.MAX_VALUE : (totalLength / minNumSplits +
+ (totalLength % minNumSplits == 0 ? 0 : 1));
+
+ // now that we have the files, generate the splits
+ int splitNum = 0;
+ for (final FileStatus file : files) {
+
+ final long len = file.getLen();
+ final long blockSize = file.getBlockSize();
+
+ final long splitSize = Math.max(minSplitSize, Math.min(maxSplitSize, blockSize));
+ final long halfSplit = splitSize >>> 1;
+
+ final long maxBytesForLastSplit = (long) (splitSize * MAX_SPLIT_SIZE_DISCREPANCY);
+
+ if (len > 0) {
+
+ // get the block locations and make sure they are in order with respect to their offset
+ final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, len);
+ Arrays.sort(blocks);
+
+ long bytesUnassigned = len;
+ long position = 0;
+
+ int blockIndex = 0;
+
+ while (bytesUnassigned > maxBytesForLastSplit) {
+ // get the block containing the majority of the data
+ blockIndex = getBlockIndexForPosition(blocks, position, halfSplit, blockIndex);
+ // create a new split
+ final FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), position, splitSize,
+ blocks[blockIndex]
+ .getHosts());
+ inputSplits.add(fis);
+
+ // adjust the positions
+ position += splitSize;
+ bytesUnassigned -= splitSize;
+ }
+
+ // assign the last split
+ if (bytesUnassigned > 0) {
+ blockIndex = getBlockIndexForPosition(blocks, position, halfSplit, blockIndex);
+ final FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), position,
+ bytesUnassigned,
+ blocks[blockIndex].getHosts());
+ inputSplits.add(fis);
+ }
+ } else {
+ // special case with a file of zero bytes size
+ final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, 0);
+ String[] hosts;
+ if (blocks.length > 0) {
+ hosts = blocks[0].getHosts();
+ } else {
+ hosts = new String[0];
+ }
+ final FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), 0, 0, hosts);
+ inputSplits.add(fis);
+ }
+ }
+
+ return inputSplits.toArray(new FileInputSplit[inputSplits.size()]);
+ }
+
+ /**
+ * Retrieves the index of the <tt>BlockLocation</tt> that contains the part of the file described by the given
+ * offset.
+ *
+ * @param blocks
+ * The different blocks of the file. Must be ordered by their offset.
+ * @param offset
+ * The offset of the position in the file.
+ * @param startIndex
+ * The earliest index to look at.
+ * @return The index of the block containing the given position.
+ */
+ private final int getBlockIndexForPosition(final BlockLocation[] blocks, final long offset,
+ final long halfSplitSize, final int startIndex) {
+
+ // go over all indexes after the startIndex
+ for (int i = startIndex; i < blocks.length; i++) {
+ long blockStart = blocks[i].getOffset();
+ long blockEnd = blockStart + blocks[i].getLength();
+
+ if (offset >= blockStart && offset < blockEnd) {
+ // got the block where the split starts
+ // check if the next block contains more than this one does
+ if (i < blocks.length - 1 && blockEnd - offset < halfSplitSize) {
+ return i + 1;
+ } else {
+ return i;
+ }
+ }
+ }
+ throw new IllegalArgumentException("The given offset is not contained in the any block.");
+ }
+
+
+ @Override
+ public Class<FileInputSplit> getInputSplitType() {
+ return FileInputSplit.class;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileOutputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileOutputVertex.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileOutputVertex.java
new file mode 100644
index 0000000..593b520
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/tasks/JobFileOutputVertex.java
@@ -0,0 +1,109 @@
+/***********************************************************************************************************************
+ *
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ *
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.util.tasks;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import eu.stratosphere.core.fs.Path;
+import eu.stratosphere.nephele.jobgraph.AbstractJobOutputVertex;
+import eu.stratosphere.nephele.jobgraph.JobGraph;
+import eu.stratosphere.nephele.jobgraph.JobVertexID;
+
+
+public class JobFileOutputVertex extends AbstractJobOutputVertex {
+
+ public static final String PATH_PROPERTY = "outputPath";
+
+ /**
+ * The path pointing to the output file/directory.
+ */
+ private Path path;
+
+
+ public JobFileOutputVertex(String name, JobVertexID id, JobGraph jobGraph) {
+ super(name, id, jobGraph);
+ }
+
+ /**
+ * Creates a new job file output vertex with the specified name.
+ *
+ * @param name
+ * the name of the new job file output vertex
+ * @param jobGraph
+ * the job graph this vertex belongs to
+ */
+ public JobFileOutputVertex(String name, JobGraph jobGraph) {
+ this(name, null, jobGraph);
+ }
+
+ /**
+ * Creates a new job file input vertex.
+ *
+ * @param jobGraph
+ * the job graph this vertex belongs to
+ */
+ public JobFileOutputVertex(JobGraph jobGraph) {
+ this(null, jobGraph);
+ }
+
+ /**
+ * Sets the path of the file the job file input vertex's task should write to.
+ *
+ * @param path
+ * the path of the file the job file input vertex's task should write to
+ */
+ public void setFilePath(Path path) {
+ this.path = path;
+ getConfiguration().setString(PATH_PROPERTY, path.toString());
+ }
+
+ /**
+ * Returns the path of the file the job file output vertex's task should write to.
+ *
+ * @return the path of the file the job file output vertex's task should write to or <code>null</code> if no path
+ * has yet been set
+ */
+ public Path getFilePath() {
+ return this.path;
+ }
+
+ @Override
+ public void read(final DataInput in) throws IOException {
+ super.read(in);
+
+ // Read path of the input file
+ boolean isNotNull = in.readBoolean();
+ if (isNotNull) {
+ this.path = new Path();
+ this.path.read(in);
+ }
+ }
+
+ @Override
+ public void write(final DataOutput out) throws IOException {
+ super.write(out);
+
+ // Write out the path of the input file
+ if (this.path == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ this.path.write(out);
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
index a28ba38..e59f4a6 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
@@ -34,7 +34,7 @@ import eu.stratosphere.api.java.record.functions.JoinFunction;
import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordComparator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordPairComparator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordSerializer;
@@ -67,7 +67,7 @@ public class HashMatchIteratorITCase {
private static final long SEED1 = 561349061987311L;
private static final long SEED2 = 231434613412342L;
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
private MemoryManager memoryManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
index d9c8b08..755d08a 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
@@ -38,7 +38,6 @@ import eu.stratosphere.nephele.services.memorymanager.MemoryAllocationException;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.nephele.template.AbstractTask;
import eu.stratosphere.pact.runtime.hash.HashMatchIteratorITCase.RecordMatch;
import eu.stratosphere.pact.runtime.hash.HashMatchIteratorITCase.RecordMatchRemovingJoin;
import eu.stratosphere.pact.runtime.hash.HashTableITCase.ConstantsKeyValuePairsIterator;
@@ -75,7 +74,7 @@ public class ReOpenableHashTableITCase {
private static final int NUM_PROBES = 3; // number of reopenings of hash join
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
private MemoryManager memoryManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
index fbe4f5b..c2be01a 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
@@ -31,7 +31,7 @@ import eu.stratosphere.nephele.services.iomanager.ChannelWriterOutputView;
import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.pact.runtime.test.util.DummyInvokable;
import eu.stratosphere.pact.runtime.test.util.TestData;
import eu.stratosphere.pact.runtime.test.util.TestData.Generator.KeyMode;
@@ -63,7 +63,7 @@ public class ChannelViewsTest
private static final int NUM_MEMORY_SEGMENTS = 3;
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
index 1809540..c960280 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
@@ -28,7 +28,7 @@ import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.ListMemorySegmentSource;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.pact.runtime.test.util.DummyInvokable;
import eu.stratosphere.pact.runtime.test.util.TestData;
import eu.stratosphere.pact.runtime.test.util.TestData.Generator.KeyMode;
@@ -54,7 +54,7 @@ public class SpillingBufferTest {
private static final int NUM_MEMORY_SEGMENTS = 23;
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
index 26ce081..f191075 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
@@ -30,7 +30,6 @@ import eu.stratosphere.nephele.services.memorymanager.MemoryAllocationException;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.nephele.template.AbstractTask;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordComparator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordSerializerFactory;
import eu.stratosphere.pact.runtime.test.util.DummyInvokable;
@@ -41,10 +40,9 @@ import eu.stratosphere.pact.runtime.test.util.TestData.Value;
import eu.stratosphere.types.Record;
import eu.stratosphere.util.MutableObjectIterator;
-/**
- */
-public class AsynchonousPartialSorterITCase
-{
+
+public class AsynchonousPartialSorterITCase {
+
private static final Log LOG = LogFactory.getLog(AsynchonousPartialSorterITCase.class);
private static final long SEED = 649180756312423613L;
@@ -57,7 +55,7 @@ public class AsynchonousPartialSorterITCase
public static final int MEMORY_SIZE = 1024 * 1024 * 32;
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
index 1851480..b873f96 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
@@ -36,7 +36,7 @@ import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordComparator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordSerializerFactory;
import eu.stratosphere.pact.runtime.test.util.DummyInvokable;
@@ -66,7 +66,7 @@ public class CombiningUnilateralSortMergerITCase {
public static final int MEMORY_SIZE = 1024 * 1024 * 256;
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
index 7ba42b9..cdb8421 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
@@ -28,7 +28,7 @@ import eu.stratosphere.api.common.typeutils.TypeSerializerFactory;
import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordComparator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordSerializerFactory;
import eu.stratosphere.pact.runtime.test.util.DummyInvokable;
@@ -61,7 +61,7 @@ public class ExternalSortITCase {
private static final int MEMORY_SIZE = 1024 * 1024 * 78;
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
@@ -238,7 +238,7 @@ public class ExternalSortITCase {
merger.close();
}
- @Test
+// @Test
public void testSpillingSortWithIntermediateMerge() throws Exception {
// amount of pairs
final int PAIRS = 10000000;
@@ -292,7 +292,7 @@ public class ExternalSortITCase {
merger.close();
}
- @Test
+// @Test
public void testSpillingSortWithIntermediateMergeIntPair() throws Exception {
// amount of pairs
final int PAIRS = 50000000;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
index f76b802..d9877f4 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
@@ -90,7 +90,7 @@ public class MassiveStringSortingITCase {
MutableObjectIterator<String> inputIterator = new StringReaderMutableObjectIterator(reader);
sorter = new UnilateralSortMerger<String>(mm, ioMan, inputIterator, new DummyInvokable(),
- new RuntimeStatelessSerializerFactory<String>(serializer, String.class), comparator, 1024 * 1024, 4, 0.8f);
+ new RuntimeStatelessSerializerFactory<String>(serializer, String.class), comparator, 1.0, 4, 0.8f);
MutableObjectIterator<String> sortedData = sorter.getIterator();
@@ -182,7 +182,7 @@ public class MassiveStringSortingITCase {
MutableObjectIterator<Tuple2<String, String[]>> inputIterator = new StringTupleReaderMutableObjectIterator(reader);
sorter = new UnilateralSortMerger<Tuple2<String, String[]>>(mm, ioMan, inputIterator, new DummyInvokable(),
- new RuntimeStatelessSerializerFactory<Tuple2<String, String[]>>(serializer, (Class<Tuple2<String, String[]>>) (Class<?>) Tuple2.class), comparator, 1024 * 1024, 4, 0.8f);
+ new RuntimeStatelessSerializerFactory<Tuple2<String, String[]>>(serializer, (Class<Tuple2<String, String[]>>) (Class<?>) Tuple2.class), comparator, 1.0, 4, 0.8f);
@@ -219,10 +219,6 @@ public class MassiveStringSortingITCase {
nextFromStratoSort = sortedData.next(nextFromStratoSort);
Assert.assertNotNull(nextFromStratoSort);
-
- if (nextFromStratoSort.f0.equals("http://some-uri.com/that/is/a/common/prefix/to/all(()HK;V3__.e*")) {
- System.out.println("Found at position " + num);
- }
Assert.assertEquals(next.f0, nextFromStratoSort.f0);
Assert.assertArrayEquals(next.f1, nextFromStratoSort.f1);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
index 0f3f558..81266d2 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
@@ -32,7 +32,7 @@ import eu.stratosphere.api.java.record.functions.JoinFunction;
import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordComparator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordPairComparator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordSerializer;
@@ -47,10 +47,9 @@ import eu.stratosphere.types.Value;
import eu.stratosphere.util.Collector;
import eu.stratosphere.util.MutableObjectIterator;
-/**
- */
-public class SortMergeMatchIteratorITCase
-{
+
+public class SortMergeMatchIteratorITCase {
+
// total memory
private static final int MEMORY_SIZE = 1024 * 1024 * 16;
private static final int PAGES_FOR_BNLJN = 2;
@@ -66,7 +65,7 @@ public class SortMergeMatchIteratorITCase
private static final long SEED2 = 231434613412342L;
// dummy abstract task
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
private MemoryManager memoryManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/util/HashVsSortMiniBenchmark.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/util/HashVsSortMiniBenchmark.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/util/HashVsSortMiniBenchmark.java
index 2999436..b744348 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/util/HashVsSortMiniBenchmark.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/task/util/HashVsSortMiniBenchmark.java
@@ -25,7 +25,7 @@ import eu.stratosphere.api.java.record.functions.JoinFunction;
import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
import eu.stratosphere.nephele.services.memorymanager.spi.DefaultMemoryManager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.pact.runtime.hash.BuildFirstHashMatchIterator;
import eu.stratosphere.pact.runtime.hash.BuildSecondHashMatchIterator;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordComparator;
@@ -67,7 +67,7 @@ public class HashVsSortMiniBenchmark {
// dummy abstract task
- private final AbstractTask parentTask = new DummyInvokable();
+ private final AbstractInvokable parentTask = new DummyInvokable();
// memory and io manager
private IOManager ioManager;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DummyInvokable.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DummyInvokable.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DummyInvokable.java
index cb0b958..7a4e09e 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DummyInvokable.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/DummyInvokable.java
@@ -13,14 +13,12 @@
package eu.stratosphere.pact.runtime.test.util;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
/**
* An invokable that does nothing.
- *
*/
-public class DummyInvokable extends AbstractTask
-{
+public class DummyInvokable extends AbstractInvokable {
@Override
public void registerInputOutput() {}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/TaskTestBase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/TaskTestBase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/TaskTestBase.java
index a60b479..efa69af 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/TaskTestBase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/test/util/TaskTestBase.java
@@ -27,9 +27,7 @@ import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.fs.FileSystem.WriteMode;
import eu.stratosphere.core.fs.Path;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
-import eu.stratosphere.nephele.template.AbstractInputTask;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.api.java.typeutils.runtime.record.RecordSerializerFactory;
import eu.stratosphere.pact.runtime.shipping.ShipStrategyType;
import eu.stratosphere.pact.runtime.task.DataSinkTask;
@@ -77,7 +75,7 @@ public abstract class TaskTestBase {
return this.mockEnv.getTaskConfiguration();
}
- public void registerTask(AbstractTask task, @SuppressWarnings("rawtypes") Class<? extends PactDriver> driver, Class<? extends Function> stubClass) {
+ public void registerTask(AbstractInvokable task, @SuppressWarnings("rawtypes") Class<? extends PactDriver> driver, Class<? extends Function> stubClass) {
final TaskConfig config = new TaskConfig(this.mockEnv.getTaskConfiguration());
config.setDriver(driver);
config.setStubWrapper(new UserCodeClassWrapper<Function>(stubClass));
@@ -91,17 +89,16 @@ public abstract class TaskTestBase {
task.registerInputOutput();
}
- public void registerTask(AbstractTask task) {
+ public void registerTask(AbstractInvokable task) {
task.setEnvironment(this.mockEnv);
task.registerInputOutput();
}
- public void registerFileOutputTask(AbstractOutputTask outTask, Class<? extends FileOutputFormat> stubClass, String outPath)
- {
+ public void registerFileOutputTask(AbstractInvokable outTask, Class<? extends FileOutputFormat> stubClass, String outPath) {
registerFileOutputTask(outTask, InstantiationUtil.instantiate(stubClass, FileOutputFormat.class), outPath);
}
- public void registerFileOutputTask(AbstractOutputTask outTask, FileOutputFormat outputFormat, String outPath) {
+ public void registerFileOutputTask(AbstractInvokable outTask, FileOutputFormat outputFormat, String outPath) {
TaskConfig dsConfig = new TaskConfig(this.mockEnv.getTaskConfiguration());
outputFormat.setOutputFilePath(new Path(outPath));
@@ -118,7 +115,7 @@ public abstract class TaskTestBase {
outTask.registerInputOutput();
}
- public void registerFileInputTask(AbstractInputTask<?> inTask,
+ public void registerFileInputTask(AbstractInvokable inTask,
Class<? extends DelimitedInputFormat> stubClass, String inPath, String delimiter)
{
DelimitedInputFormat format;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/bufferprovider/LocalBufferPoolTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/bufferprovider/LocalBufferPoolTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/bufferprovider/LocalBufferPoolTest.java
index c7d8d41..4f9313f 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/bufferprovider/LocalBufferPoolTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/network/bufferprovider/LocalBufferPoolTest.java
@@ -15,6 +15,8 @@ package eu.stratosphere.runtime.io.network.bufferprovider;
import eu.stratosphere.runtime.io.Buffer;
import eu.stratosphere.runtime.io.network.bufferprovider.BufferProvider.BufferAvailabilityRegistration;
+import eu.stratosphere.util.LogUtils;
+
import org.junit.After;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -33,6 +35,10 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class LocalBufferPoolTest {
+
+ static {
+ LogUtils.initializeDefaultTestConsoleLogger();
+ }
private final static int NUM_BUFFERS = 2048;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/TransitiveClosureITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/TransitiveClosureITCase.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/TransitiveClosureITCase.java
index 96761c8..2c5fa9d 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/TransitiveClosureITCase.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/exampleJavaPrograms/TransitiveClosureITCase.java
@@ -41,7 +41,7 @@ public class TransitiveClosureITCase extends JavaProgramTestBase {
@Override
protected void testProgram() throws Exception {
- TransitiveClosureNaive.main(edgesPath, resultPath, "100");
+ TransitiveClosureNaive.main(edgesPath, resultPath, "5");
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
index 109c91a..d18160b 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/iterative/nephele/JobGraphUtils.java
@@ -30,7 +30,6 @@ import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
import eu.stratosphere.nephele.jobgraph.JobInputVertex;
import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobTaskVertex;
-import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.pact.runtime.iterative.io.FakeOutputTask;
import eu.stratosphere.pact.runtime.iterative.task.IterationSynchronizationSinkTask;
import eu.stratosphere.pact.runtime.task.DataSinkTask;
@@ -62,9 +61,7 @@ public class JobGraphUtils {
{
JobInputVertex inputVertex = new JobInputVertex(name, graph);
- @SuppressWarnings("unchecked")
- Class<AbstractInputTask<?>> clazz = (Class<AbstractInputTask<?>>) (Class<?>) DataSourceTask.class;
- inputVertex.setInputClass(clazz);
+ inputVertex.setInvokableClass(DataSourceTask.class);
inputVertex.setNumberOfSubtasks(degreeOfParallelism);
@@ -91,14 +88,14 @@ public class JobGraphUtils {
int degreeOfParallelism)
{
JobTaskVertex taskVertex = new JobTaskVertex(name, graph);
- taskVertex.setTaskClass(task);
+ taskVertex.setInvokableClass(task);
taskVertex.setNumberOfSubtasks(degreeOfParallelism);
return taskVertex;
}
public static JobOutputVertex createSync(JobGraph jobGraph, int degreeOfParallelism) {
JobOutputVertex sync = new JobOutputVertex("BulkIterationSync", jobGraph);
- sync.setOutputClass(IterationSynchronizationSinkTask.class);
+ sync.setInvokableClass(IterationSynchronizationSinkTask.class);
sync.setNumberOfSubtasks(1);
TaskConfig syncConfig = new TaskConfig(sync.getConfiguration());
syncConfig.setGateIterativeWithNumberOfEventsUntilInterrupt(0, degreeOfParallelism);
@@ -108,7 +105,7 @@ public class JobGraphUtils {
public static JobOutputVertex createFakeOutput(JobGraph jobGraph, String name, int degreeOfParallelism)
{
JobOutputVertex outputVertex = new JobOutputVertex(name, jobGraph);
- outputVertex.setOutputClass(FakeOutputTask.class);
+ outputVertex.setInvokableClass(FakeOutputTask.class);
outputVertex.setNumberOfSubtasks(degreeOfParallelism);
return outputVertex;
}
@@ -116,7 +113,7 @@ public class JobGraphUtils {
public static JobOutputVertex createFileOutput(JobGraph jobGraph, String name, int degreeOfParallelism)
{
JobOutputVertex sinkVertex = new JobOutputVertex(name, jobGraph);
- sinkVertex.setOutputClass(DataSinkTask.class);
+ sinkVertex.setInvokableClass(DataSinkTask.class);
sinkVertex.setNumberOfSubtasks(degreeOfParallelism);
return sinkVertex;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
index 3de547e..aa498d8 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
@@ -13,8 +13,6 @@
package eu.stratosphere.test.recordJobs.util;
-import java.io.IOException;
-
import eu.stratosphere.api.common.io.OutputFormat;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.types.Record;
@@ -23,28 +21,20 @@ import eu.stratosphere.types.Record;
* A simple output format that discards all data by doing nothing.
*/
public class DiscardingOutputFormat implements OutputFormat<Record> {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 1L;
@Override
- public void configure(Configuration parameters)
- {}
+ public void configure(Configuration parameters) {}
@Override
- public void open(int taskNumber, int numTasks) throws IOException
- {}
-
+ public void open(int taskNumber, int numTasks) {}
@Override
- public void writeRecord(Record record) throws IOException
- {}
-
+ public void writeRecord(Record record) {}
- @Override
- public void close() throws IOException
- {}
@Override
- public void initialize(Configuration configuration){}
+ public void close() {}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
index a8ab311..ed6f608 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/runtime/NetworkStackThroughput.java
@@ -13,38 +13,35 @@
package eu.stratosphere.test.runtime;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.After;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.io.IOReadableWritable;
import eu.stratosphere.nephele.jobgraph.DistributionPattern;
-import eu.stratosphere.nephele.jobgraph.JobGenericInputVertex;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
import eu.stratosphere.nephele.jobgraph.JobInputVertex;
import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobTaskVertex;
-import eu.stratosphere.nephele.template.AbstractGenericInputTask;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
import eu.stratosphere.runtime.io.channels.ChannelType;
import eu.stratosphere.test.util.RecordAPITestBase;
import eu.stratosphere.util.LogUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.junit.After;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
@RunWith(Parameterized.class)
public class NetworkStackThroughput extends RecordAPITestBase {
@@ -153,8 +150,8 @@ public class NetworkStackThroughput extends RecordAPITestBase {
JobGraph jobGraph = new JobGraph("Speed Test");
- JobInputVertex producer = new JobGenericInputVertex("Speed Test Producer", jobGraph);
- producer.setInputClass(SpeedTestProducer.class);
+ JobInputVertex producer = new JobInputVertex("Speed Test Producer", jobGraph);
+ producer.setInvokableClass(SpeedTestProducer.class);
producer.setNumberOfSubtasks(numSubtasks);
producer.getConfiguration().setInteger(DATA_VOLUME_GB_CONFIG_KEY, dataVolumeGb);
producer.getConfiguration().setBoolean(IS_SLOW_SENDER_CONFIG_KEY, isSlowSender);
@@ -162,12 +159,12 @@ public class NetworkStackThroughput extends RecordAPITestBase {
JobTaskVertex forwarder = null;
if (useForwarder) {
forwarder = new JobTaskVertex("Speed Test Forwarder", jobGraph);
- forwarder.setTaskClass(SpeedTestForwarder.class);
+ forwarder.setInvokableClass(SpeedTestForwarder.class);
forwarder.setNumberOfSubtasks(numSubtasks);
}
JobOutputVertex consumer = new JobOutputVertex("Speed Test Consumer", jobGraph);
- consumer.setOutputClass(SpeedTestConsumer.class);
+ consumer.setInvokableClass(SpeedTestConsumer.class);
consumer.setNumberOfSubtasks(numSubtasks);
consumer.getConfiguration().setBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, isSlowReceiver);
@@ -188,7 +185,7 @@ public class NetworkStackThroughput extends RecordAPITestBase {
// ------------------------------------------------------------------------
- public static class SpeedTestProducer extends AbstractGenericInputTask {
+ public static class SpeedTestProducer extends AbstractInvokable {
private RecordWriter<SpeedTestRecord> writer;
@@ -227,7 +224,7 @@ public class NetworkStackThroughput extends RecordAPITestBase {
}
}
- public static class SpeedTestForwarder extends AbstractTask {
+ public static class SpeedTestForwarder extends AbstractInvokable {
private RecordReader<SpeedTestRecord> reader;
@@ -252,7 +249,7 @@ public class NetworkStackThroughput extends RecordAPITestBase {
}
}
- public static class SpeedTestConsumer extends AbstractOutputTask {
+ public static class SpeedTestConsumer extends AbstractInvokable {
private RecordReader<SpeedTestRecord> reader;
[04/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTest.java
new file mode 100644
index 0000000..7460200
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTest.java
@@ -0,0 +1,232 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance.cluster;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.net.InetAddress;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import eu.stratosphere.nephele.instance.*;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import eu.stratosphere.configuration.ConfigConstants;
+import eu.stratosphere.configuration.Configuration;
+import eu.stratosphere.configuration.GlobalConfiguration;
+import eu.stratosphere.nephele.jobgraph.JobID;
+import eu.stratosphere.util.LogUtils;
+
+/**
+ * Tests for {@link eu.stratosphere.nephele.instance.DefaultInstanceManager}.
+ */
+public class DefaultInstanceManagerTest {
+
+ @BeforeClass
+ public static void initLogging() {
+ LogUtils.initializeDefaultTestConsoleLogger();
+ }
+
+
+ @Test
+ public void testInstanceRegistering() {
+ try {
+ DefaultInstanceManager cm = new DefaultInstanceManager();
+ TestInstanceListener testInstanceListener = new TestInstanceListener();
+ cm.setInstanceListener(testInstanceListener);
+
+
+ int ipcPort = ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT;
+ int dataPort = ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT;
+
+ HardwareDescription hardwareDescription = HardwareDescriptionFactory.construct(2, 2L * 1024L * 1024L * 1024L,
+ 2L * 1024L * 1024L * 1024L);
+
+ String hostname = "192.168.198.1";
+ InetAddress address = InetAddress.getByName("192.168.198.1");
+
+ InstanceConnectionInfo ici1 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 0, dataPort + 0);
+ InstanceConnectionInfo ici2 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 15, dataPort + 15);
+ InstanceConnectionInfo ici3 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 30, dataPort + 30);
+
+ // register three instances
+ cm.registerTaskManager(ici1, hardwareDescription, 1);
+ cm.registerTaskManager(ici2, hardwareDescription, 1);
+ cm.registerTaskManager(ici3, hardwareDescription, 1);
+
+
+ assertEquals(3, cm.getNumberOfSlots());
+
+ cm.shutdown();
+ }
+ catch (Exception e) {
+ System.err.println(e.getMessage());
+ e.printStackTrace();
+ Assert.fail("Test erroneous: " + e.getMessage());
+ }
+ }
+
+ @Test
+ public void testAllocationDeallocation() {
+ try {
+ DefaultInstanceManager cm = new DefaultInstanceManager();
+ TestInstanceListener testInstanceListener = new TestInstanceListener();
+ cm.setInstanceListener(testInstanceListener);
+
+
+ int ipcPort = ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT;
+ int dataPort = ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT;
+
+ HardwareDescription hardwareDescription = HardwareDescriptionFactory.construct(2, 2L * 1024L * 1024L * 1024L,
+ 2L * 1024L * 1024L * 1024L);
+
+ String hostname = "192.168.198.1";
+ InetAddress address = InetAddress.getByName("192.168.198.1");
+
+ InstanceConnectionInfo ici1 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 0, dataPort + 0);
+ InstanceConnectionInfo ici2 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 15, dataPort + 15);
+
+ // register three instances
+ cm.registerTaskManager(ici1, hardwareDescription, 1);
+ cm.registerTaskManager(ici2, hardwareDescription, 1);
+
+ assertEquals(2, cm.getNumberOfSlots());
+
+
+ // allocate something
+ JobID jobID = new JobID();
+ Configuration conf = new Configuration();
+ cm.requestInstance(jobID, conf, 2);
+
+ DefaultInstanceManagerTestUtils.waitForInstances(jobID, testInstanceListener, 3, 1000);
+
+ List<AllocatedResource> allocatedResources = testInstanceListener.getAllocatedResourcesForJob(jobID);
+ assertEquals(2, allocatedResources.size());
+
+ Iterator<AllocatedResource> it = allocatedResources.iterator();
+ Set<AllocationID> allocationIDs = new HashSet<AllocationID>();
+ while (it.hasNext()) {
+ AllocatedResource allocatedResource = it.next();
+
+ if (allocationIDs.contains(allocatedResource.getAllocationID())) {
+ fail("Discovered allocation ID " + allocatedResource.getAllocationID() + " at least twice");
+ } else {
+ allocationIDs.add(allocatedResource.getAllocationID());
+ }
+ }
+
+ // Try to allocate more resources which must result in an error
+ try {
+ cm.requestInstance(jobID, conf, 3);
+
+ fail("ClusterManager allowed to request more instances than actually available");
+
+ } catch (InstanceException ie) {
+ // Exception is expected and correct behavior here
+ }
+
+ // Release all allocated resources
+ it = allocatedResources.iterator();
+ while (it.hasNext()) {
+ final AllocatedResource allocatedResource = it.next();
+ cm.releaseAllocatedResource(allocatedResource);
+ }
+
+ // Now further allocations should be possible
+
+ cm.requestInstance(jobID, conf, 1);
+
+
+ cm.shutdown();
+ }
+ catch (Exception e) {
+ System.err.println(e.getMessage());
+ e.printStackTrace();
+ Assert.fail("Test erroneous: " + e.getMessage());
+ }
+ }
+
+ /**
+ * This test checks the clean-up routines of the cluster manager.
+ */
+ @Test
+ public void testCleanUp() {
+ try {
+
+ final int CLEANUP_INTERVAL = 2;
+
+ // configure a short cleanup interval
+ Configuration config = new Configuration();
+ config.setInteger("instancemanager.cluster.cleanupinterval", CLEANUP_INTERVAL);
+ GlobalConfiguration.includeConfiguration(config);
+
+ DefaultInstanceManager cm = new DefaultInstanceManager();
+ TestInstanceListener testInstanceListener = new TestInstanceListener();
+ cm.setInstanceListener(testInstanceListener);
+
+
+ int ipcPort = ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT;
+ int dataPort = ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT;
+
+ HardwareDescription hardwareDescription = HardwareDescriptionFactory.construct(2, 2L * 1024L * 1024L * 1024L,
+ 2L * 1024L * 1024L * 1024L);
+
+ String hostname = "192.168.198.1";
+ InetAddress address = InetAddress.getByName("192.168.198.1");
+
+ InstanceConnectionInfo ici1 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 0, dataPort + 0);
+ InstanceConnectionInfo ici2 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 15, dataPort + 15);
+ InstanceConnectionInfo ici3 = new InstanceConnectionInfo(address, hostname, null, ipcPort + 30, dataPort + 30);
+
+ // register three instances
+ cm.registerTaskManager(ici1, hardwareDescription, 1);
+ cm.registerTaskManager(ici2, hardwareDescription, 1);
+ cm.registerTaskManager(ici3, hardwareDescription, 1);
+
+ assertEquals(3, cm.getNumberOfSlots());
+
+ // request some instances
+ JobID jobID = new JobID();
+ Configuration conf = new Configuration();
+
+ cm.requestInstance(jobID, conf, 1);
+
+ DefaultInstanceManagerTestUtils.waitForInstances(jobID, testInstanceListener, 1, 1000);
+ assertEquals(1, testInstanceListener.getNumberOfAllocatedResourcesForJob(jobID));
+
+ // wait for the cleanup to kick in
+ Thread.sleep(2000 * CLEANUP_INTERVAL);
+
+ // check that the instances are gone
+ DefaultInstanceManagerTestUtils.waitForInstances(jobID, testInstanceListener, 0, 1000);
+ assertEquals(0, testInstanceListener.getNumberOfAllocatedResourcesForJob(jobID));
+
+
+ assertEquals(0, cm.getNumberOfSlots());
+
+ cm.shutdown();
+ }
+ catch (Exception e) {
+ System.err.println(e.getMessage());
+ e.printStackTrace();
+ Assert.fail("Test erroneous: " + e.getMessage());
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTestUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTestUtils.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTestUtils.java
new file mode 100644
index 0000000..ca3d971
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/DefaultInstanceManagerTestUtils.java
@@ -0,0 +1,66 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.instance.cluster;
+
+import eu.stratosphere.nephele.instance.InstanceListener;
+import eu.stratosphere.nephele.jobgraph.JobID;
+
+/**
+ * This class contains utility methods used during the tests of the {@link eu.stratosphere.nephele.instance.DefaultInstanceManager} implementation.
+ *
+ */
+public class DefaultInstanceManagerTestUtils {
+
+ /**
+ * Granularity of the sleep time.
+ */
+ private static final long SLEEP_TIME = 10; // 10 milliseconds
+
+ /**
+ * Private constructor so the class cannot be instantiated.
+ */
+ private DefaultInstanceManagerTestUtils() {
+ }
+
+ /**
+ * Waits until a specific number of instances have registered or deregistrations with the given
+ * {@link InstanceListener} object for a given job or the maximum wait time has elapsed.
+ *
+ * @param jobID
+ * the ID of the job to check the instance registration for
+ * @param instanceListener
+ * the listener which shall be notified when a requested instance is available for the job
+ * @param numberOfInstances
+ * the number of registered instances to wait for
+ * @param maxWaitTime
+ * the maximum wait time before this method returns
+ */
+ public static void waitForInstances(JobID jobID, TestInstanceListener instanceListener,
+ int numberOfInstances, long maxWaitTime) {
+
+ final long startTime = System.currentTimeMillis();
+
+ while (instanceListener.getNumberOfAllocatedResourcesForJob(jobID) != numberOfInstances) {
+ try {
+ Thread.sleep(SLEEP_TIME);
+ } catch (InterruptedException e) {
+ break;
+ }
+
+ if ((System.currentTimeMillis() - startTime) >= maxWaitTime) {
+ break;
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/HostInClusterTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/HostInClusterTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/HostInClusterTest.java
index 952e588..1bac907 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/HostInClusterTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/HostInClusterTest.java
@@ -16,30 +16,22 @@ package eu.stratosphere.nephele.instance.cluster;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.net.InetAddress;
import java.net.UnknownHostException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
+import eu.stratosphere.nephele.instance.*;
import org.junit.Test;
import eu.stratosphere.configuration.ConfigConstants;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.HardwareDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.topology.NetworkTopology;
/**
- * Tests for {@link ClusterInstance}.
+ * Tests for {@link eu.stratosphere.nephele.instance.Instance}.
*
*/
public class HostInClusterTest {
@@ -49,7 +41,7 @@ public class HostInClusterTest {
*
* @return a cluster instance of a special test type
*/
- private ClusterInstance createTestClusterInstance() {
+ private Instance createTestClusterInstance() {
final int ipcPort = ConfigConstants.DEFAULT_TASK_MANAGER_IPC_PORT;
final int dataPort = ConfigConstants.DEFAULT_TASK_MANAGER_DATA_PORT;
@@ -60,16 +52,8 @@ public class HostInClusterTest {
fail(e.getMessage());
}
- final String identifier = "testtype";
- final int numComputeUnits = 8;
final int numCores = 8;
final int memorySize = 32 * 1024;
- final int diskCapacity = 200;
- final int pricePerHour = 10;
-
- final InstanceType capacity = InstanceTypeFactory.construct(identifier, numComputeUnits, numCores, memorySize,
- diskCapacity,
- pricePerHour);
final InstanceConnectionInfo instanceConnectionInfo = new InstanceConnectionInfo(inetAddress, ipcPort, dataPort);
@@ -77,8 +61,8 @@ public class HostInClusterTest {
memorySize * 1024L * 1024L, memorySize * 1024L * 1024L);
final NetworkTopology topology = NetworkTopology.createEmptyTopology();
- ClusterInstance host = new ClusterInstance(instanceConnectionInfo, capacity, topology.getRootNode(), topology,
- hardwareDescription);
+ Instance host = new Instance(instanceConnectionInfo, topology.getRootNode(), topology,
+ hardwareDescription, 8);
return host;
}
@@ -90,7 +74,7 @@ public class HostInClusterTest {
public void testHeartBeat() {
// check that heart beat is triggered correctly.
- ClusterInstance host = createTestClusterInstance();
+ Instance host = createTestClusterInstance();
host.reportHeartBeat();
@@ -111,33 +95,33 @@ public class HostInClusterTest {
@Test
public void testAccounting() {
// check whether the accounting of capacity works correctly
- final ClusterInstance host = createTestClusterInstance();
+ final Instance host = createTestClusterInstance();
final JobID jobID = new JobID();
- final int numComputeUnits = 8 / 8;
- final int numCores = 8 / 8;
- final int memorySize = 32 * 1024 / 8;
- final int diskCapacity = 200 / 8;
- final InstanceType type = InstanceTypeFactory.construct("dummy", numComputeUnits, numCores, memorySize,
- diskCapacity, -1);
for (int run = 0; run < 2; ++run) {
// do this twice to check that everything is correctly freed
- AllocatedSlice[] slices = new AllocatedSlice[8];
+ AllocatedResource[] allocatedSlots = new AllocatedResource[8];
for (int i = 0; i < 8; ++i) {
- slices[i] = host.createSlice(type, jobID);
- assertNotNull(slices[i]);
- assertEquals(numComputeUnits, slices[i].getType().getNumberOfComputeUnits());
- assertEquals(numCores, slices[i].getType().getNumberOfCores());
- assertEquals(memorySize, slices[i].getType().getMemorySize());
- assertEquals(diskCapacity, slices[i].getType().getDiskCapacity());
+ try {
+ allocatedSlots[i] = host.allocateSlot(jobID);
+ }catch(InstanceException ex){
+ fail(ex.getMessage());
+ }
+
+ assertNotNull(allocatedSlots[i]);
}
// now no resources should be left
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 1, 0, 0, 0, 0), jobID));
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 0, 1, 0, 0, 0), jobID));
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 0, 0, 1, 0, 0), jobID));
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 0, 0, 0, 1, 0), jobID));
+ boolean instanceException = false;
+
+ try{
+ host.allocateSlot(jobID);
+ }catch(InstanceException ex){
+ instanceException = true;
+ }
+
+ assertTrue(instanceException);
for (int i = 0; i < 8; ++i) {
- host.removeAllocatedSlice(slices[i].getAllocationID());
+ host.releaseSlot(allocatedSlots[i].getAllocationID());
}
}
}
@@ -149,47 +133,51 @@ public class HostInClusterTest {
public void testTermination() {
// check whether the accounting of capacity works correctly if terminateAllInstances is called
- final ClusterInstance host = createTestClusterInstance();
+ final Instance host = createTestClusterInstance();
final JobID jobID = new JobID();
- final int numComputeUnits = 8 / 8;
- final int numCores = 8 / 8;
- final int memorySize = 32 * 1024 / 8;
- final int diskCapacity = 200 / 8;
- final InstanceType type = InstanceTypeFactory.construct("dummy", numComputeUnits, numCores, memorySize,
- diskCapacity, -1);
for (int run = 0; run < 2; ++run) {
// do this twice to check that everything is correctly freed
- AllocatedSlice[] slices = new AllocatedSlice[8];
+ AllocatedResource[] allocatedResources = new AllocatedResource[8];
for (int i = 0; i < 8; ++i) {
- slices[i] = host.createSlice(type, jobID);
- assertNotNull(slices[i]);
- assertEquals(numComputeUnits, slices[i].getType().getNumberOfComputeUnits());
- assertEquals(numCores, slices[i].getType().getNumberOfCores());
- assertEquals(memorySize, slices[i].getType().getMemorySize());
- assertEquals(diskCapacity, slices[i].getType().getDiskCapacity());
+ try {
+ allocatedResources[i] = host.allocateSlot(jobID);
+ }catch (InstanceException ex){
+ fail(ex.getMessage());
+ }
+
+ assertNotNull(allocatedResources[i]);
}
+
+ boolean instanceException = false;
// now no resources should be left
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 1, 0, 0, 0, 0), jobID));
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 0, 1, 0, 0, 0), jobID));
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 0, 0, 1, 0, 0), jobID));
- assertNull(host.createSlice(InstanceTypeFactory.construct("dummy", 0, 0, 0, 1, 0), jobID));
- List<AllocatedSlice> removedSlices = host.removeAllAllocatedSlices();
-
- final Set<AllocatedSlice> slicesSet = new HashSet<AllocatedSlice>();
- for(int i = 0; i < slices.length; ++i) {
- slicesSet.add(slices[i]);
+ try {
+ host.allocateSlot(jobID);
+ } catch (InstanceException ex){
+ instanceException = true;
+ }
+
+ assertTrue(instanceException);
+ Collection<AllocatedSlot> allocatedSlots = host.removeAllocatedSlots();
+ Set<AllocationID> removedAllocationIDs = new HashSet<AllocationID>();
+
+ for(AllocatedSlot slot: allocatedSlots){
+ removedAllocationIDs.add(slot.getAllocationID());
+ }
+
+ final Set<AllocationID> allocationIDs = new HashSet<AllocationID>();
+ for(int i = 0; i < allocatedResources.length; ++i) {
+ allocationIDs.add(allocatedResources[i].getAllocationID());
}
- final Set<AllocatedSlice> removedSlicesSet = new HashSet<AllocatedSlice>(removedSlices);
-
+
//Check if both sets are equal
- assertEquals(slicesSet.size(), removedSlices.size());
- final Iterator<AllocatedSlice> it = slicesSet.iterator();
+ assertEquals(allocationIDs.size(), removedAllocationIDs.size());
+ final Iterator<AllocationID> it = allocationIDs.iterator();
while(it.hasNext()) {
- assertTrue(removedSlicesSet.remove(it.next()));
+ assertTrue(removedAllocationIDs.remove(it.next()));
}
- assertEquals(0, removedSlicesSet.size());
+ assertEquals(0, removedAllocationIDs.size());
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMapTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMapTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMapTest.java
deleted file mode 100644
index 974283d..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/cluster/PendingRequestsMapTest.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.instance.cluster;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Iterator;
-import java.util.Map;
-
-import org.junit.Test;
-
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
-
-/**
- * This class checks the {@link PendingRequestsMap} data structure.
- *
- */
-public class PendingRequestsMapTest {
-
- /**
- * The first instance type used in the tests.
- */
- private static final InstanceType INSTANCE_TYPE1 = InstanceTypeFactory.construct("test1", 1, 1, 2, 2, 0);
-
- /**
- * The second instance type used in the tests.
- */
- private static final InstanceType INSTANCE_TYPE2 = InstanceTypeFactory.construct("test2", 2, 2, 4, 4, 0);
-
- /**
- * Checks the correctness of the {@link PendingRequestsMap} data structure.
- */
- @Test
- public void testPendingRequestsMap() {
-
- final PendingRequestsMap prm = new PendingRequestsMap();
-
- assertFalse(prm.hasPendingRequests());
-
- prm.addRequest(INSTANCE_TYPE1, 1);
- prm.addRequest(INSTANCE_TYPE2, 2);
- prm.addRequest(INSTANCE_TYPE2, 2);
-
- assertTrue(prm.hasPendingRequests());
-
- final Iterator<Map.Entry<InstanceType, Integer>> it = prm.iterator();
- int iterationCounter = 0;
- while (it.hasNext()) {
-
- final Map.Entry<InstanceType, Integer> entry = it.next();
- ++iterationCounter;
-
- if (entry.getKey().equals(INSTANCE_TYPE1)) {
- assertEquals(1, entry.getValue().intValue());
- }
-
- if (entry.getKey().equals(INSTANCE_TYPE2)) {
- assertEquals(4, entry.getValue().intValue());
- }
- }
-
- assertEquals(2, iterationCounter);
-
- prm.decreaseNumberOfPendingInstances(INSTANCE_TYPE1);
- prm.decreaseNumberOfPendingInstances(INSTANCE_TYPE1);
- prm.decreaseNumberOfPendingInstances(INSTANCE_TYPE1); // This call is actually superfluous
-
- assertTrue(prm.hasPendingRequests());
-
- prm.decreaseNumberOfPendingInstances(INSTANCE_TYPE2);
- prm.decreaseNumberOfPendingInstances(INSTANCE_TYPE2);
- prm.decreaseNumberOfPendingInstances(INSTANCE_TYPE2);
- prm.decreaseNumberOfPendingInstances(INSTANCE_TYPE2);
-
- assertFalse(prm.hasPendingRequests());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
index 92ea5ab..a8f1331 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/instance/local/LocalInstanceManagerTest.java
@@ -13,16 +13,15 @@
package eu.stratosphere.nephele.instance.local;
-import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
-import eu.stratosphere.nephele.ExecutionMode;
+import eu.stratosphere.nephele.instance.InstanceManager;
import junit.framework.Assert;
import org.junit.Test;
+import eu.stratosphere.nephele.ExecutionMode;
import eu.stratosphere.configuration.GlobalConfiguration;
-import eu.stratosphere.nephele.instance.InstanceType;
import eu.stratosphere.nephele.jobmanager.JobManager;
import eu.stratosphere.nephele.util.ServerTestUtils;
@@ -53,17 +52,9 @@ public class LocalInstanceManagerTest {
final TestInstanceListener testInstanceListener = new TestInstanceListener();
- LocalInstanceManager lm = (LocalInstanceManager) jm.getInstanceManager(); // this is for sure, because I chose the local strategy
+ InstanceManager im = jm.getInstanceManager();
try {
- lm.setInstanceListener(testInstanceListener);
-
- final InstanceType defaultInstanceType = lm.getDefaultInstanceType();
- assertEquals("test", defaultInstanceType.getIdentifier());
- assertEquals(4, defaultInstanceType.getNumberOfComputeUnits());
- assertEquals(4, defaultInstanceType.getNumberOfCores());
- assertEquals(1024, defaultInstanceType.getMemorySize());
- assertEquals(160, defaultInstanceType.getDiskCapacity());
- assertEquals(0, defaultInstanceType.getPricePerHour());
+ im.setInstanceListener(testInstanceListener);
} catch (Exception e) {
e.printStackTrace();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
index 063b827..fa4fbfa 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
@@ -34,11 +34,8 @@ import eu.stratosphere.nephele.util.FileLineWriter;
import eu.stratosphere.nephele.util.JarFileCreator;
import eu.stratosphere.nephele.util.ServerTestUtils;
import eu.stratosphere.util.LogUtils;
-import eu.stratosphere.util.StringUtils;
-import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -166,19 +163,23 @@ public class JobManagerITCase {
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
i1.setFileInputClass(FileLineReader.class);
i1.setFilePath(new Path(new File(testDirectory).toURI()));
+ i1.setNumberOfSubtasks(1);
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
t1.setTaskClass(ForwardTask.class);
+ t1.setNumberOfSubtasks(1);
// task vertex 2
final JobTaskVertex t2 = new JobTaskVertex("Task 2", jg);
t2.setTaskClass(ForwardTask.class);
+ t2.setNumberOfSubtasks(1);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
o1.setFileOutputClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
+ o1.setNumberOfSubtasks(1);
t1.setVertexToShareInstancesWith(i1);
t2.setVertexToShareInstancesWith(i1);
@@ -473,19 +474,23 @@ public class JobManagerITCase {
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
i1.setFileInputClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile.toURI()));
+ i1.setNumberOfSubtasks(1);
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
t1.setTaskClass(ForwardTask.class);
+ t1.setNumberOfSubtasks(1);
// task vertex 2
final JobTaskVertex t2 = new JobTaskVertex("Task 2", jg);
t2.setTaskClass(ForwardTask.class);
+ t2.setNumberOfSubtasks(1);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
o1.setFileOutputClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
+ o1.setNumberOfSubtasks(1);
t1.setVertexToShareInstancesWith(i1);
t2.setVertexToShareInstancesWith(i1);
@@ -747,6 +752,7 @@ public class JobManagerITCase {
JobFileOutputVertex o1 = new JobFileOutputVertex("Output", jg);
o1.setFileOutputClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
+ o1.setNumberOfSubtasks(1);
i1.setVertexToShareInstancesWith(o1);
i2.setVertexToShareInstancesWith(o1);
@@ -877,27 +883,23 @@ public class JobManagerITCase {
i1.setFileInputClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile1.toURI()));
i1.setNumberOfSubtasks(numberOfSubtasks);
- i1.setNumberOfSubtasksPerInstance(numberOfSubtasks);
// input vertex 2
final JobFileInputVertex i2 = new JobFileInputVertex("Input 2", jg);
i2.setFileInputClass(FileLineReader.class);
i2.setFilePath(new Path(inputFile2.toURI()));
i2.setNumberOfSubtasks(numberOfSubtasks);
- i2.setNumberOfSubtasksPerInstance(numberOfSubtasks);
// union task
final JobTaskVertex f1 = new JobTaskVertex("Forward 1", jg);
f1.setTaskClass(DoubleTargetTask.class);
f1.setNumberOfSubtasks(numberOfSubtasks);
- f1.setNumberOfSubtasksPerInstance(numberOfSubtasks);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output", jg);
o1.setFileOutputClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
o1.setNumberOfSubtasks(numberOfSubtasks);
- o1.setNumberOfSubtasksPerInstance(numberOfSubtasks);
i1.setVertexToShareInstancesWith(o1);
i2.setVertexToShareInstancesWith(o1);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
new file mode 100644
index 0000000..c8bcddc
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
@@ -0,0 +1,185 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.jobmanager.scheduler.queue;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.List;
+
+import eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler;
+
+import org.junit.Test;
+
+import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.execution.ExecutionState;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
+import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
+import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
+import eu.stratosphere.nephele.executiongraph.GraphConversionException;
+import eu.stratosphere.nephele.jobgraph.JobGraph;
+import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
+import eu.stratosphere.nephele.jobgraph.JobInputVertex;
+import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
+import eu.stratosphere.nephele.jobmanager.scheduler.SchedulingException;
+import eu.stratosphere.nephele.template.AbstractGenericInputTask;
+import eu.stratosphere.nephele.template.AbstractOutputTask;
+import eu.stratosphere.runtime.io.api.RecordReader;
+import eu.stratosphere.runtime.io.api.RecordWriter;
+import eu.stratosphere.runtime.io.channels.ChannelType;
+import eu.stratosphere.util.StringUtils;
+
+/**
+ * This class checks the functionality of the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} class
+ */
+public class DefaultSchedulerTest {
+
+ /**
+ * Test input task.
+ *
+ */
+ public static final class InputTask extends AbstractGenericInputTask {
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void registerInputOutput() {
+ new RecordWriter<StringRecord>(this);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void invoke() throws Exception {
+ // Nothing to do here
+ }
+
+ }
+
+ /**
+ * Test output task.
+ *
+ */
+ public static final class OutputTask extends AbstractOutputTask {
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void registerInputOutput() {
+ new RecordReader<StringRecord>(this, StringRecord.class);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void invoke() throws Exception {
+ // Nothing to do here
+ }
+
+ }
+
+ /**
+ * Constructs a sample execution graph consisting of two vertices connected by a channel of the given type.
+ *
+ * @param channelType
+ * the channel type to connect the vertices with
+ * @return a sample execution graph
+ */
+ private ExecutionGraph createExecutionGraph(ChannelType channelType) {
+
+ final JobGraph jobGraph = new JobGraph("Job Graph");
+
+ final JobInputVertex inputVertex = new JobInputVertex("Input 1", jobGraph);
+ inputVertex.setInputClass(InputTask.class);
+ inputVertex.setNumberOfSubtasks(1);
+
+ final JobOutputVertex outputVertex = new JobOutputVertex("Output 1", jobGraph);
+ outputVertex.setOutputClass(OutputTask.class);
+ outputVertex.setNumberOfSubtasks(1);
+
+ try {
+ inputVertex.connectTo(outputVertex, channelType);
+ } catch (JobGraphDefinitionException e) {
+ fail(StringUtils.stringifyException(e));
+ }
+
+ try {
+ LibraryCacheManager.register(jobGraph.getJobID(), new String[0]);
+ return new ExecutionGraph(jobGraph, 1);
+
+ } catch (GraphConversionException e) {
+ fail(StringUtils.stringifyException(e));
+ } catch (IOException e) {
+ fail(StringUtils.stringifyException(e));
+ }
+
+ return null;
+ }
+
+ /**
+ * Checks the behavior of the scheduleJob() method with a job consisting of two tasks connected via an in-memory
+ * channel.
+ */
+ @Test
+ public void testScheduleJobWithInMemoryChannel() {
+
+ final TestInstanceManager tim = new TestInstanceManager();
+ final TestDeploymentManager tdm = new TestDeploymentManager();
+ final DefaultScheduler scheduler = new DefaultScheduler(tdm, tim);
+
+ final ExecutionGraph executionGraph = createExecutionGraph(ChannelType.IN_MEMORY);
+
+ try {
+ try {
+ scheduler.scheduleJob(executionGraph);
+ } catch (SchedulingException e) {
+ fail(StringUtils.stringifyException(e));
+ }
+
+ // Wait for the deployment to complete
+ tdm.waitForDeployment();
+
+ assertEquals(executionGraph.getJobID(), tdm.getIDOfLastDeployedJob());
+ final List<ExecutionVertex> listOfDeployedVertices = tdm.getListOfLastDeployedVertices();
+ assertNotNull(listOfDeployedVertices);
+ // Vertices connected via in-memory channels must be deployed in a single cycle.
+ assertEquals(2, listOfDeployedVertices.size());
+
+ // Check if the release of the allocated resources works properly by simulating the vertices' life cycle
+ assertEquals(0, tim.getNumberOfReleaseMethodCalls());
+
+ // Simulate vertex life cycle
+ for (final ExecutionVertex vertex : listOfDeployedVertices) {
+ vertex.updateExecutionState(ExecutionState.STARTING);
+ vertex.updateExecutionState(ExecutionState.RUNNING);
+ vertex.updateExecutionState(ExecutionState.FINISHING);
+ vertex.updateExecutionState(ExecutionState.FINISHED);
+ }
+
+ assertEquals(1, tim.getNumberOfReleaseMethodCalls());
+ } finally {
+ try {
+ LibraryCacheManager.unregister(executionGraph.getJobID());
+ } catch (IOException ioe) {
+ // Ignore exception here
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueSchedulerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueSchedulerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueSchedulerTest.java
deleted file mode 100644
index f1e3191..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/QueueSchedulerTest.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager.scheduler.queue;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.List;
-
-import eu.stratosphere.runtime.io.api.RecordWriter;
-import org.junit.Test;
-
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.nephele.execution.ExecutionState;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.executiongraph.GraphConversionException;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.runtime.io.api.RecordReader;
-import eu.stratosphere.runtime.io.channels.ChannelType;
-import eu.stratosphere.nephele.jobgraph.JobGraph;
-import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
-import eu.stratosphere.nephele.jobgraph.JobInputVertex;
-import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
-import eu.stratosphere.nephele.jobmanager.scheduler.SchedulingException;
-import eu.stratosphere.nephele.template.AbstractGenericInputTask;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
-import eu.stratosphere.util.StringUtils;
-
-/**
- * This class checks the functionality of the {@link QueueScheduler} class
- */
-public class QueueSchedulerTest {
-
- /**
- * Test input task.
- *
- */
- public static final class InputTask extends AbstractGenericInputTask {
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void registerInputOutput() {
- new RecordWriter<StringRecord>(this);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void invoke() throws Exception {
- // Nothing to do here
- }
-
- }
-
- /**
- * Test output task.
- *
- */
- public static final class OutputTask extends AbstractOutputTask {
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void registerInputOutput() {
- new RecordReader<StringRecord>(this, StringRecord.class);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void invoke() throws Exception {
- // Nothing to do here
- }
-
- }
-
- /**
- * Constructs a sample execution graph consisting of two vertices connected by a channel of the given type.
- *
- * @param channelType
- * the channel type to connect the vertices with
- * @param instanceManager
- * the instance manager that shall be used during the creation of the execution graph
- * @return a sample execution graph
- */
- private ExecutionGraph createExecutionGraph(final ChannelType channelType, final InstanceManager instanceManager) {
-
- final JobGraph jobGraph = new JobGraph("Job Graph");
-
- final JobInputVertex inputVertex = new JobInputVertex("Input 1", jobGraph);
- inputVertex.setInputClass(InputTask.class);
- inputVertex.setNumberOfSubtasks(1);
-
- final JobOutputVertex outputVertex = new JobOutputVertex("Output 1", jobGraph);
- outputVertex.setOutputClass(OutputTask.class);
- outputVertex.setNumberOfSubtasks(1);
-
- try {
- inputVertex.connectTo(outputVertex, channelType);
- } catch (JobGraphDefinitionException e) {
- fail(StringUtils.stringifyException(e));
- }
-
- try {
- LibraryCacheManager.register(jobGraph.getJobID(), new String[0]);
- return new ExecutionGraph(jobGraph, instanceManager);
-
- } catch (GraphConversionException e) {
- fail(StringUtils.stringifyException(e));
- } catch (IOException e) {
- fail(StringUtils.stringifyException(e));
- }
-
- return null;
- }
-
- /**
- * Checks the behavior of the scheduleJob() method with a job consisting of two tasks connected via an in-memory
- * channel.
- */
- @Test
- public void testSchedulJobWithInMemoryChannel() {
-
- final TestInstanceManager tim = new TestInstanceManager();
- final TestDeploymentManager tdm = new TestDeploymentManager();
- final QueueScheduler scheduler = new QueueScheduler(tdm, tim);
-
- final ExecutionGraph executionGraph = createExecutionGraph(ChannelType.IN_MEMORY, tim);
-
- try {
- try {
- scheduler.schedulJob(executionGraph);
- } catch (SchedulingException e) {
- fail(StringUtils.stringifyException(e));
- }
-
- // Wait for the deployment to complete
- tdm.waitForDeployment();
-
- assertEquals(executionGraph.getJobID(), tdm.getIDOfLastDeployedJob());
- final List<ExecutionVertex> listOfDeployedVertices = tdm.getListOfLastDeployedVertices();
- assertNotNull(listOfDeployedVertices);
- // Vertices connected via in-memory channels must be deployed in a single cycle.
- assertEquals(2, listOfDeployedVertices.size());
-
- // Check if the release of the allocated resources works properly by simulating the vertices' life cycle
- assertEquals(0, tim.getNumberOfReleaseMethodCalls());
-
- // Simulate vertex life cycle
- for (final ExecutionVertex vertex : listOfDeployedVertices) {
- vertex.updateExecutionState(ExecutionState.STARTING);
- vertex.updateExecutionState(ExecutionState.RUNNING);
- vertex.updateExecutionState(ExecutionState.FINISHING);
- vertex.updateExecutionState(ExecutionState.FINISHED);
- }
-
- assertEquals(1, tim.getNumberOfReleaseMethodCalls());
- } finally {
- try {
- LibraryCacheManager.unregister(executionGraph.getJobID());
- } catch (IOException ioe) {
- // Ignore exception here
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestDeploymentManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestDeploymentManager.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestDeploymentManager.java
index 9f3c190..a118455 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestDeploymentManager.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestDeploymentManager.java
@@ -16,7 +16,7 @@ package eu.stratosphere.nephele.jobmanager.scheduler.queue;
import java.util.List;
import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.instance.AbstractInstance;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.jobmanager.DeploymentManager;
@@ -46,7 +46,7 @@ public class TestDeploymentManager implements DeploymentManager {
@Override
- public void deploy(final JobID jobID, final AbstractInstance instance,
+ public void deploy(final JobID jobID, final Instance instance,
final List<ExecutionVertex> verticesToBeDeployed) {
this.jobID = jobID;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestInstanceManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestInstanceManager.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestInstanceManager.java
index 955d3a0..5a3977a 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestInstanceManager.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/TestInstanceManager.java
@@ -16,32 +16,18 @@ package eu.stratosphere.nephele.jobmanager.scheduler.queue;
import java.net.Inet4Address;
import java.net.UnknownHostException;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.AllocationID;
-import eu.stratosphere.nephele.instance.HardwareDescription;
-import eu.stratosphere.nephele.instance.HardwareDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.instance.InstanceTypeDescriptionFactory;
-import eu.stratosphere.nephele.instance.InstanceTypeFactory;
+import eu.stratosphere.nephele.instance.*;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.topology.NetworkNode;
import eu.stratosphere.nephele.topology.NetworkTopology;
import eu.stratosphere.util.StringUtils;
/**
- * A dummy implementation of an {@link InstanceManager} used for the {@link QueueScheduler} unit tests.
+ * A dummy implementation of an {@link eu.stratosphere.nephele.instance.InstanceManager} used for the {@link QueueScheduler} unit tests.
* <p>
* This class is thread-safe.
*
@@ -49,16 +35,6 @@ import eu.stratosphere.util.StringUtils;
public final class TestInstanceManager implements InstanceManager {
/**
- * The default instance type to be used during the tests.
- */
- private static final InstanceType INSTANCE_TYPE = InstanceTypeFactory.construct("test", 1, 1, 1024, 1024, 10);
-
- /**
- * The instances this instance manager is responsible of.
- */
- private final Map<InstanceType, InstanceTypeDescription> instanceMap = new HashMap<InstanceType, InstanceTypeDescription>();
-
- /**
* Counts the number of times the method releaseAllocatedResource is called.
*/
private volatile int numberOfReleaseCalls = 0;
@@ -74,16 +50,19 @@ public final class TestInstanceManager implements InstanceManager {
private final List<AllocatedResource> allocatedResources;
/**
- * Test implementation of {@link AbstractInstance}.
+ * The test instance
+ */
+ private final TestInstance testInstance;
+
+ /**
+ * Test implementation of {@link eu.stratosphere.nephele.instance.Instance}.
*
*/
- private static final class TestInstance extends AbstractInstance {
+ private static final class TestInstance extends Instance {
/**
* Constructs a new test instance.
*
- * @param instanceType
- * the instance type
* @param instanceConnectionInfo
* the instance connection information
* @param parentNode
@@ -92,11 +71,13 @@ public final class TestInstanceManager implements InstanceManager {
* the network topology
* @param hardwareDescription
* the hardware description
+ * @param numberSlots
+ * the number of slots available on the instance
*/
- public TestInstance(final InstanceType instanceType, final InstanceConnectionInfo instanceConnectionInfo,
+ public TestInstance(final InstanceConnectionInfo instanceConnectionInfo,
final NetworkNode parentNode, final NetworkTopology networkTopology,
- final HardwareDescription hardwareDescription) {
- super(instanceType, instanceConnectionInfo, parentNode, networkTopology, hardwareDescription);
+ final HardwareDescription hardwareDescription, int numberSlots) {
+ super(instanceConnectionInfo, parentNode, networkTopology, hardwareDescription, numberSlots);
}
}
@@ -106,15 +87,13 @@ public final class TestInstanceManager implements InstanceManager {
public TestInstanceManager() {
final HardwareDescription hd = HardwareDescriptionFactory.construct(1, 1L, 1L);
- final InstanceTypeDescription itd = InstanceTypeDescriptionFactory.construct(INSTANCE_TYPE, hd, 1);
- instanceMap.put(INSTANCE_TYPE, itd);
this.allocatedResources = new ArrayList<AllocatedResource>();
try {
final InstanceConnectionInfo ici = new InstanceConnectionInfo(Inet4Address.getLocalHost(), 1, 1);
final NetworkTopology nt = new NetworkTopology();
- final TestInstance ti = new TestInstance(INSTANCE_TYPE, ici, nt.getRootNode(), nt, hd);
- this.allocatedResources.add(new AllocatedResource(ti, INSTANCE_TYPE, new AllocationID()));
+ this.testInstance = new TestInstance(ici, nt.getRootNode(), nt, hd, 1);
+ this.allocatedResources.add(new AllocatedResource(testInstance, new AllocationID()));
} catch (UnknownHostException e) {
throw new RuntimeException(StringUtils.stringifyException(e));
}
@@ -123,18 +102,7 @@ public final class TestInstanceManager implements InstanceManager {
@Override
public void requestInstance(final JobID jobID, final Configuration conf,
- final InstanceRequestMap instanceRequestMap, final List<String> splitAffinityList) throws InstanceException {
-
- if (instanceRequestMap.size() != 1) {
- throw new InstanceException(
- "requestInstance of TestInstanceManager expected to receive request for a single instance type");
- }
-
- if (instanceRequestMap.getMinimumNumberOfInstances(INSTANCE_TYPE) != 1) {
- throw new InstanceException(
- "requestInstance of TestInstanceManager expected to receive request for one instance of type "
- + INSTANCE_TYPE.getIdentifier());
- }
+ int requiredSlots) throws InstanceException {
if (this.instanceListener == null) {
throw new InstanceException("instanceListener not registered with TestInstanceManager");
@@ -158,8 +126,7 @@ public final class TestInstanceManager implements InstanceManager {
@Override
- public void releaseAllocatedResource(final JobID jobID, final Configuration conf,
- final AllocatedResource allocatedResource) throws InstanceException {
+ public void releaseAllocatedResource(final AllocatedResource allocatedResource) throws InstanceException {
++this.numberOfReleaseCalls;
}
@@ -176,32 +143,16 @@ public final class TestInstanceManager implements InstanceManager {
@Override
- public InstanceType getSuitableInstanceType(final int minNumComputeUnits, final int minNumCPUCores,
- final int minMemorySize, final int minDiskCapacity, final int maxPricePerHour) {
- throw new IllegalStateException("getSuitableInstanceType called on TestInstanceManager");
- }
-
-
- @Override
- public void reportHeartBeat(final InstanceConnectionInfo instanceConnectionInfo,
- final HardwareDescription hardwareDescription) {
+ public void reportHeartBeat(final InstanceConnectionInfo instanceConnectionInfo) {
throw new IllegalStateException("reportHeartBeat called on TestInstanceManager");
}
-
@Override
- public InstanceType getInstanceTypeByName(final String instanceTypeName) {
- throw new IllegalStateException("getInstanceTypeByName called on TestInstanceManager");
+ public void registerTaskManager(final InstanceConnectionInfo instanceConnectionInfo,
+ final HardwareDescription hardwareDescription, int numberSlots){
+ throw new IllegalStateException("registerTaskManager called on TestInstanceManager.");
}
-
- @Override
- public InstanceType getDefaultInstanceType() {
-
- return INSTANCE_TYPE;
- }
-
-
@Override
public NetworkTopology getNetworkTopology(final JobID jobID) {
throw new IllegalStateException("getNetworkTopology called on TestInstanceManager");
@@ -214,27 +165,11 @@ public final class TestInstanceManager implements InstanceManager {
this.instanceListener = instanceListener;
}
-
- @Override
- public Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes() {
-
- return this.instanceMap;
- }
-
-
@Override
- public AbstractInstance getInstanceByName(final String name) {
+ public Instance getInstanceByName(final String name) {
throw new IllegalStateException("getInstanceByName called on TestInstanceManager");
}
-
- @Override
- public void cancelPendingRequests(final JobID jobID) {
- throw new IllegalStateException("cancelPendingRequests called on TestInstanceManager");
-
- }
-
-
@Override
public void shutdown() {
throw new IllegalStateException("shutdown called on TestInstanceManager");
@@ -244,4 +179,9 @@ public final class TestInstanceManager implements InstanceManager {
public int getNumberOfTaskTrackers() {
throw new IllegalStateException("getNumberOfTaskTrackers called on TestInstanceManager");
}
+
+ @Override
+ public int getNumberOfSlots() {
+ return this.testInstance.getNumberOfSlots();
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/managementgraph/ManagementGraphTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/managementgraph/ManagementGraphTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/managementgraph/ManagementGraphTest.java
index 630f365..f1d164a 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/managementgraph/ManagementGraphTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/managementgraph/ManagementGraphTest.java
@@ -131,7 +131,6 @@ public class ManagementGraphTest {
assertEquals(origVertex.getExecutionState(), copyVertex.getExecutionState());
assertEquals(origVertex.getIndexInGroup(), copyVertex.getIndexInGroup());
assertEquals(origVertex.getInstanceName(), copyVertex.getInstanceName());
- assertEquals(origVertex.getInstanceType(), copyVertex.getInstanceType());
assertEquals(origVertex.getNumberOfInputGates(), copyVertex.getNumberOfInputGates());
assertEquals(origVertex.getNumberOfOutputGates(), copyVertex.getNumberOfOutputGates());
@@ -248,15 +247,15 @@ public class ManagementGraphTest {
// Vertices
final ManagementVertex vertex1_1 = new ManagementVertex(groupVertex1, new ManagementVertexID(), "Host 1",
- "small", 0);
+ 0);
final ManagementVertex vertex2_1 = new ManagementVertex(groupVertex2, new ManagementVertexID(), "Host 2",
- "medium", 0);
+ 0);
final ManagementVertex vertex2_2 = new ManagementVertex(groupVertex2, new ManagementVertexID(), "Host 2",
- "medium", 1);
+ 1);
final ManagementVertex vertex3_1 = new ManagementVertex(groupVertex3, new ManagementVertexID(), "Host 2",
- "medium", 0);
+ 0);
final ManagementVertex vertex4_1 = new ManagementVertex(groupVertex4, new ManagementVertexID(), "Host 2",
- "medium", 0);
+ 0);
// Input/output gates
final ManagementGate outputGate1_1 = new ManagementGate(vertex1_1, new ManagementGateID(), 0, false);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerITCase.java
index 47de74f..02190ca 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerITCase.java
@@ -62,7 +62,7 @@ public class IOManagerITCase {
@Before
public void beforeTest() {
- memoryManager = new DefaultMemoryManager(NUMBER_OF_SEGMENTS * SEGMENT_SIZE);
+ memoryManager = new DefaultMemoryManager(NUMBER_OF_SEGMENTS * SEGMENT_SIZE, 1);
ioManager = new IOManager();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerPerformanceBenchmark.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerPerformanceBenchmark.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerPerformanceBenchmark.java
index 1f0c509..7936a95 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerPerformanceBenchmark.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerPerformanceBenchmark.java
@@ -69,7 +69,7 @@ public class IOManagerPerformanceBenchmark
@Before
public void startup()
{
- memManager = new DefaultMemoryManager(MEMORY_SIZE);
+ memManager = new DefaultMemoryManager(MEMORY_SIZE,1);
ioManager = new IOManager();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerTest.java
index a4d92f1..460d546 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/iomanager/IOManagerTest.java
@@ -44,7 +44,7 @@ public class IOManagerTest
@Before
public void beforeTest()
{
- this.memoryManager = new DefaultMemoryManager(32 * 1024 * 1024);
+ this.memoryManager = new DefaultMemoryManager(32 * 1024 * 1024, 1);
this.ioManager = new IOManager();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/memorymanager/MemorySegmentTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/memorymanager/MemorySegmentTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/memorymanager/MemorySegmentTest.java
index 0d12582..fdd6448 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/memorymanager/MemorySegmentTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/services/memorymanager/MemorySegmentTest.java
@@ -46,7 +46,7 @@ public class MemorySegmentTest {
@Before
public void setUp() throws Exception{
try {
- this.manager = new DefaultMemoryManager(MANAGED_MEMORY_SIZE, PAGE_SIZE);
+ this.manager = new DefaultMemoryManager(MANAGED_MEMORY_SIZE, 1, PAGE_SIZE);
this.segment = manager.allocatePages(new DefaultMemoryManagerTest.DummyInvokable(), 1).get(0);
this.random = new Random(RANDOM_SEED);
} catch (Exception e) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
index d6cb9b0..4202880 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/ServerTestUtils.java
@@ -24,14 +24,11 @@ import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
-import java.util.Map;
import java.util.jar.JarEntry;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import eu.stratosphere.core.io.IOReadableWritable;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
import eu.stratosphere.nephele.jobmanager.JobManagerITCase;
import eu.stratosphere.nephele.protocols.ExtendedManagementProtocol;
@@ -55,6 +52,8 @@ public final class ServerTestUtils {
*/
private static final String ECLIPSE_PATH_EXTENSION = "/src/test/resources";
+ private static final String INTELLIJ_PATH_EXTENSION = "/stratosphere-runtime/src/test/resources";
+
/**
* Private constructor.
*/
@@ -201,6 +200,12 @@ public final class ServerTestUtils {
return configDir;
}
+ configDir = System.getProperty(USER_DIR_KEY) + INTELLIJ_PATH_EXTENSION + CORRECT_CONF_DIR;
+
+ if(new File(configDir).exists()){
+ return configDir;
+ }
+
return null;
}
@@ -217,12 +222,8 @@ public final class ServerTestUtils {
public static void waitForJobManagerToBecomeReady(final ExtendedManagementProtocol jobManager) throws IOException,
InterruptedException {
- Map<InstanceType, InstanceTypeDescription> instanceMap = jobManager.getMapOfAvailableInstanceTypes();
-
- while (instanceMap.isEmpty()) {
-
+ while (jobManager.getAvailableSlots() == 0) {
Thread.sleep(100);
- instanceMap = jobManager.getMapOfAvailableInstanceTypes();
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
index ab57a18..a28ba38 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashMatchIteratorITCase.java
@@ -98,7 +98,7 @@ public class HashMatchIteratorITCase {
this.pairRecordPairComparator = new IntPairRecordPairComparator();
this.recordPairPairComparator = new RecordIntPairPairComparator();
- this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE);
+ this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, 1);
this.ioManager = new IOManager();
}
@@ -150,7 +150,7 @@ public class HashMatchIteratorITCase {
new BuildFirstHashMatchIterator<Record, Record, Record>(
input1, input2, this.recordSerializer, this.record1Comparator,
this.recordSerializer, this.record2Comparator, this.recordPairComparator,
- this.memoryManager, ioManager, this.parentTask, MEMORY_SIZE);
+ this.memoryManager, ioManager, this.parentTask, 1.0);
iterator.open();
@@ -237,7 +237,7 @@ public class HashMatchIteratorITCase {
new BuildFirstHashMatchIterator<Record, Record, Record>(
input1, input2, this.recordSerializer, this.record1Comparator,
this.recordSerializer, this.record2Comparator, this.recordPairComparator,
- this.memoryManager, ioManager, this.parentTask, MEMORY_SIZE);
+ this.memoryManager, ioManager, this.parentTask, 1.0);
iterator.open();
@@ -286,7 +286,7 @@ public class HashMatchIteratorITCase {
new BuildSecondHashMatchIterator<Record, Record, Record>(
input1, input2, this.recordSerializer, this.record1Comparator,
this.recordSerializer, this.record2Comparator, this.recordPairComparator,
- this.memoryManager, ioManager, this.parentTask, MEMORY_SIZE);
+ this.memoryManager, ioManager, this.parentTask, 1.0);
iterator.open();
@@ -373,7 +373,7 @@ public class HashMatchIteratorITCase {
new BuildSecondHashMatchIterator<Record, Record, Record>(
input1, input2, this.recordSerializer, this.record1Comparator,
this.recordSerializer, this.record2Comparator, this.recordPairComparator,
- this.memoryManager, ioManager, this.parentTask, MEMORY_SIZE);
+ this.memoryManager, ioManager, this.parentTask, 1.0);
iterator.open();
@@ -420,7 +420,7 @@ public class HashMatchIteratorITCase {
new BuildSecondHashMatchIterator<IntPair, Record, Record>(
input1, input2, this.pairSerializer, this.pairComparator,
this.recordSerializer, this.record2Comparator, this.pairRecordPairComparator,
- this.memoryManager, this.ioManager, this.parentTask, MEMORY_SIZE);
+ this.memoryManager, this.ioManager, this.parentTask, 1.0);
iterator.open();
@@ -467,7 +467,7 @@ public class HashMatchIteratorITCase {
new BuildFirstHashMatchIterator<IntPair, Record, Record>(
input1, input2, this.pairSerializer, this.pairComparator,
this.recordSerializer, this.record2Comparator, this.recordPairPairComparator,
- this.memoryManager, this.ioManager, this.parentTask, MEMORY_SIZE);
+ this.memoryManager, this.ioManager, this.parentTask, 1.0);
iterator.open();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashTableITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashTableITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashTableITCase.java
index a845318..4a2fd7d 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashTableITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/HashTableITCase.java
@@ -91,7 +91,7 @@ public class HashTableITCase {
this.pairProbeSideComparator = new IntPairComparator();
this.pairComparator = new IntPairPairComparator();
- this.memManager = new DefaultMemoryManager(32 * 1024 * 1024);
+ this.memManager = new DefaultMemoryManager(32 * 1024 * 1024,1);
this.ioManager = new IOManager();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
index 4a4e13a..d9c8b08 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/hash/ReOpenableHashTableITCase.java
@@ -116,7 +116,7 @@ public class ReOpenableHashTableITCase {
this.recordProbeSideComparator = new RecordComparator(keyPos, keyType);
this.pactRecordComparator = new HashTableITCase.RecordPairComparatorFirstInt();
- this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, PAGE_SIZE);
+ this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE,1, PAGE_SIZE);
this.ioManager = new IOManager();
}
@@ -238,7 +238,7 @@ public class ReOpenableHashTableITCase {
new BuildFirstReOpenableHashMatchIterator<Record, Record, Record>(
buildInput, probeInput, this.recordSerializer, this.record1Comparator,
this.recordSerializer, this.record2Comparator, this.recordPairComparator,
- this.memoryManager, ioManager, this.parentTask, MEMORY_SIZE);
+ this.memoryManager, ioManager, this.parentTask, 1.0);
iterator.open();
// do first join with both inputs
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
index ffc4ae7..fbe4f5b 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/ChannelViewsTest.java
@@ -73,7 +73,7 @@ public class ChannelViewsTest
@Before
public void beforeTest() {
- this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, MEMORY_PAGE_SIZE);
+ this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, 1, MEMORY_PAGE_SIZE);
this.ioManager = new IOManager();
}
@@ -189,7 +189,7 @@ public class ChannelViewsTest
List<MemorySegment> memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelWriter writer = this.ioManager.createBlockChannelWriter(channel);
final ChannelWriterOutputView outView = new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
-
+
// write a number of pairs
final Record rec = new Record();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
@@ -197,13 +197,13 @@ public class ChannelViewsTest
rec.write(outView);
}
this.memoryManager.release(outView.close());
-
+
// create the reader input view
memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelReader reader = this.ioManager.createBlockChannelReader(channel);
final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
generator.reset();
-
+
// read and re-generate all records and compare them
try {
final Record readRec = new Record();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
index fbf363d..1809540 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/io/SpillingBufferTest.java
@@ -64,7 +64,7 @@ public class SpillingBufferTest {
@Before
public void beforeTest() {
- memoryManager = new DefaultMemoryManager(MEMORY_SIZE);
+ memoryManager = new DefaultMemoryManager(MEMORY_SIZE, 1);
ioManager = new IOManager();
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/iterative/event/EventWithAggregatorsTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/iterative/event/EventWithAggregatorsTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/iterative/event/EventWithAggregatorsTest.java
index dcda405..8204eed 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/iterative/event/EventWithAggregatorsTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/iterative/event/EventWithAggregatorsTest.java
@@ -117,6 +117,8 @@ public class EventWithAggregatorsTest {
private static class TestAggregator<T extends Value> implements Aggregator<T> {
+ private static final long serialVersionUID = 1L;
+
private final T val;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableIteratorTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableIteratorTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableIteratorTest.java
index c7b4644..49afc3a 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableIteratorTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableIteratorTest.java
@@ -50,7 +50,7 @@ public class BlockResettableIteratorTest
@Before
public void startup() {
// set up IO and memory manager
- this.memman = new DefaultMemoryManager(MEMORY_CAPACITY);
+ this.memman = new DefaultMemoryManager(MEMORY_CAPACITY, 1);
// create test objects
this.objects = new ArrayList<Record>(20000);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableMutableObjectIteratorTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableMutableObjectIteratorTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableMutableObjectIteratorTest.java
index 4b562bf..e3349fd 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableMutableObjectIteratorTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/resettable/BlockResettableMutableObjectIteratorTest.java
@@ -52,7 +52,7 @@ public class BlockResettableMutableObjectIteratorTest
@Before
public void startup() {
// set up IO and memory manager
- this.memman = new DefaultMemoryManager(MEMORY_CAPACITY);
+ this.memman = new DefaultMemoryManager(MEMORY_CAPACITY, 1);
// create test objects
this.objects = new ArrayList<Record>(20000);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
index b416b73..26ce081 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/AsynchonousPartialSorterITCase.java
@@ -72,7 +72,7 @@ public class AsynchonousPartialSorterITCase
@Before
public void beforeTest()
{
- this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE);
+ this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE,1);
this.ioManager = new IOManager();
this.serializer = RecordSerializerFactory.get();
this.comparator = new RecordComparator(new int[] {0}, new Class[] {TestData.Key.class});
@@ -107,7 +107,7 @@ public class AsynchonousPartialSorterITCase
// merge iterator
LOG.debug("Initializing sortmerger...");
Sorter<Record> sorter = new AsynchronousPartialSorter<Record>(this.memoryManager, source,
- this.parentTask, this.serializer, this.comparator, 32 * 1024 * 1024);
+ this.parentTask, this.serializer, this.comparator, 1.0);
runPartialSorter(sorter, NUM_RECORDS, 0);
}
@@ -130,7 +130,7 @@ public class AsynchonousPartialSorterITCase
// merge iterator
LOG.debug("Initializing sortmerger...");
Sorter<Record> sorter = new AsynchronousPartialSorter<Record>(this.memoryManager, source,
- this.parentTask, this.serializer, this.comparator, 32 * 1024 * 1024);
+ this.parentTask, this.serializer, this.comparator, 1.0);
runPartialSorter(sorter, NUM_RECORDS, 2);
}
@@ -153,7 +153,7 @@ public class AsynchonousPartialSorterITCase
// merge iterator
LOG.debug("Initializing sortmerger...");
Sorter<Record> sorter = new AsynchronousPartialSorter<Record>(this.memoryManager, source,
- this.parentTask, this.serializer, this.comparator, 32 * 1024 * 1024);
+ this.parentTask, this.serializer, this.comparator, 1.0);
runPartialSorter(sorter, NUM_RECORDS, 28);
}
@@ -178,7 +178,7 @@ public class AsynchonousPartialSorterITCase
// merge iterator
LOG.debug("Initializing sortmerger...");
sorter = new ExceptionThrowingAsynchronousPartialSorter<Record>(this.memoryManager, source,
- this.parentTask, this.serializer, this.comparator, 32 * 1024 * 1024);
+ this.parentTask, this.serializer, this.comparator, 1.0);
runPartialSorter(sorter, NUM_RECORDS, 0);
@@ -283,10 +283,10 @@ public class AsynchonousPartialSorterITCase
public ExceptionThrowingAsynchronousPartialSorter(MemoryManager memoryManager,
MutableObjectIterator<E> input, AbstractInvokable parentTask,
TypeSerializerFactory<E> serializer, TypeComparator<E> comparator,
- long totalMemory)
+ double memoryFraction)
throws IOException, MemoryAllocationException
{
- super(memoryManager, input, parentTask, serializer, comparator, totalMemory);
+ super(memoryManager, input, parentTask, serializer, comparator, memoryFraction);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
index 09777a3..1851480 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/CombiningUnilateralSortMergerITCase.java
@@ -85,7 +85,7 @@ public class CombiningUnilateralSortMergerITCase {
@SuppressWarnings("unchecked")
@Before
public void beforeTest() {
- this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE);
+ this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, 1);
this.ioManager = new IOManager();
this.serializerFactory = RecordSerializerFactory.get();
@@ -121,7 +121,7 @@ public class CombiningUnilateralSortMergerITCase {
Sorter<Record> merger = new CombiningUnilateralSortMerger<Record>(comb,
this.memoryManager, this.ioManager, reader, this.parentTask, this.serializerFactory, this.comparator,
- 64 * 1024 * 1024, 64, 0.7f);
+ 0.25, 64, 0.7f);
final Record rec = new Record();
rec.setField(1, new IntValue(1));
@@ -162,7 +162,7 @@ public class CombiningUnilateralSortMergerITCase {
Sorter<Record> merger = new CombiningUnilateralSortMerger<Record>(comb,
this.memoryManager, this.ioManager, reader, this.parentTask, this.serializerFactory, this.comparator,
- 3 * 1024 * 1024, 64, 0.005f);
+ 0.01, 64, 0.005f);
final Record rec = new Record();
rec.setField(1, new IntValue(1));
@@ -211,7 +211,7 @@ public class CombiningUnilateralSortMergerITCase {
Sorter<Record> merger = new CombiningUnilateralSortMerger<Record>(comb,
this.memoryManager, this.ioManager, reader, this.parentTask, this.serializerFactory, this.comparator,
- 64 * 1024 * 1024, 2, 0.7f);
+ 0.25, 2, 0.7f);
// emit data
LOG.debug("emitting data");
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
index bd68382..7ba42b9 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/ExternalSortITCase.java
@@ -76,7 +76,7 @@ public class ExternalSortITCase {
@SuppressWarnings("unchecked")
@Before
public void beforeTest() {
- this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE);
+ this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, 1);
this.ioManager = new IOManager();
this.pactRecordSerializer = RecordSerializerFactory.get();
@@ -113,7 +113,7 @@ public class ExternalSortITCase {
Sorter<Record> merger = new UnilateralSortMerger<Record>(this.memoryManager, this.ioManager,
source, this.parentTask, this.pactRecordSerializer, this.pactRecordComparator,
- 64 * 1024 * 1024, 2, 0.9f);
+ (double)64/78, 2, 0.9f);
// emit data
LOG.debug("Reading and sorting data...");
@@ -159,7 +159,7 @@ public class ExternalSortITCase {
Sorter<Record> merger = new UnilateralSortMerger<Record>(this.memoryManager, this.ioManager,
source, this.parentTask, this.pactRecordSerializer, this.pactRecordComparator,
- 64 * 1024 * 1024, 10, 2, 0.9f);
+ (double)64/78, 10, 2, 0.9f);
// emit data
LOG.debug("Reading and sorting data...");
@@ -205,7 +205,7 @@ public class ExternalSortITCase {
Sorter<Record> merger = new UnilateralSortMerger<Record>(this.memoryManager, this.ioManager,
source, this.parentTask, this.pactRecordSerializer, this.pactRecordComparator,
- 16 * 1024 * 1024, 64, 0.7f);
+ (double)16/78, 64, 0.7f);
// emit data
LOG.debug("Reading and sorting data...");
@@ -254,7 +254,7 @@ public class ExternalSortITCase {
Sorter<Record> merger = new UnilateralSortMerger<Record>(this.memoryManager, this.ioManager,
source, this.parentTask, this.pactRecordSerializer, this.pactRecordComparator,
- 64 * 1024 * 1024, 16, 0.7f);
+ (double)64/78, 16, 0.7f);
// emit data
LOG.debug("Emitting data...");
@@ -307,7 +307,7 @@ public class ExternalSortITCase {
LOG.debug("Initializing sortmerger...");
Sorter<IntPair> merger = new UnilateralSortMerger<IntPair>(this.memoryManager, this.ioManager,
- generator, this.parentTask, serializerFactory, comparator, 64 * 1024 * 1024, 4, 0.7f);
+ generator, this.parentTask, serializerFactory, comparator, (double)64/78, 4, 0.7f);
// emit data
LOG.debug("Emitting data...");
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
index cb570c4..f76b802 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/MassiveStringSortingITCase.java
@@ -80,7 +80,7 @@ public class MassiveStringSortingITCase {
BufferedReader verifyReader = null;
try {
- MemoryManager mm = new DefaultMemoryManager(1024 * 1024);
+ MemoryManager mm = new DefaultMemoryManager(1024 * 1024, 1);
IOManager ioMan = new IOManager();
TypeSerializer<String> serializer = StringSerializer.INSTANCE;
@@ -170,7 +170,7 @@ public class MassiveStringSortingITCase {
BufferedReader verifyReader = null;
try {
- MemoryManager mm = new DefaultMemoryManager(1024 * 1024);
+ MemoryManager mm = new DefaultMemoryManager(1024 * 1024, 1);
IOManager ioMan = new IOManager();
TupleTypeInfo<Tuple2<String, String[]>> typeInfo = (TupleTypeInfo<Tuple2<String, String[]>>) (TupleTypeInfo<?>) TypeInfoParser.parse("Tuple2<String, String[]>");
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
index 8437c7e..0f3f558 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/pact/runtime/sort/SortMergeMatchIteratorITCase.java
@@ -88,7 +88,7 @@ public class SortMergeMatchIteratorITCase
this.comparator2 = new RecordComparator(new int[] {0}, new Class[]{TestData.Key.class});
this.pairComparator = new RecordPairComparator(new int[] {0}, new int[] {0}, new Class[]{TestData.Key.class});
- this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE);
+ this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, 1);
this.ioManager = new IOManager();
}
[16/22] git commit: Standardized creation of input and output
channels in InputGate and OutputGate. Removed linear contains check in
InputGate for channels.
Posted by se...@apache.org.
Standardized creation of input and output channels in InputGate and OutputGate. Removed linear contains check in InputGate for channels.
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/26926433
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/26926433
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/26926433
Branch: refs/heads/master
Commit: 26926433cbb82ee1789622b6f9baf0638907a69e
Parents: e52fcf9
Author: Till Rohrmann <ti...@gmail.com>
Authored: Wed Apr 9 19:35:57 2014 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:20 2014 +0200
----------------------------------------------------------------------
.../eu/stratosphere/nephele/execution/RuntimeEnvironment.java | 6 ------
1 file changed, 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/26926433/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
index 70718a9..2416b07 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
@@ -83,12 +83,6 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
private final List<InputGate<? extends IOReadableWritable>> inputGates = new CopyOnWriteArrayList<InputGate<? extends IOReadableWritable>>();
/**
- * Queue of unbound output gate IDs which are required for deserializing an environment in the course of an RPC
- * call.
- */
- private final Queue<GateID> unboundOutputGateIDs = new ArrayDeque<GateID>();
-
- /**
* Queue of unbound input gate IDs which are required for deserializing an environment in the course of an RPC
* call.
*/
[07/22] Rework the Taskmanager to a slot based model and remove
legacy cloud code
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
index 6e25796..8a3cba4 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManager.java
@@ -16,13 +16,14 @@ package eu.stratosphere.nephele.jobmanager;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.InputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
-import java.util.Map;
+import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -32,6 +33,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import eu.stratosphere.nephele.ExecutionMode;
import eu.stratosphere.nephele.managementgraph.ManagementVertexID;
import eu.stratosphere.nephele.taskmanager.TaskKillResult;
+
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@@ -68,14 +70,11 @@ import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
import eu.stratosphere.nephele.executiongraph.GraphConversionException;
import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
import eu.stratosphere.nephele.executiongraph.JobStatusListener;
-import eu.stratosphere.nephele.instance.AbstractInstance;
import eu.stratosphere.nephele.instance.DummyInstance;
import eu.stratosphere.nephele.instance.HardwareDescription;
+import eu.stratosphere.nephele.instance.Instance;
import eu.stratosphere.nephele.instance.InstanceConnectionInfo;
import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.instance.local.LocalInstanceManager;
import eu.stratosphere.runtime.io.channels.ChannelID;
import eu.stratosphere.nephele.ipc.RPC;
import eu.stratosphere.nephele.ipc.Server;
@@ -85,7 +84,7 @@ import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.jobmanager.accumulators.AccumulatorManager;
import eu.stratosphere.nephele.jobmanager.archive.ArchiveListener;
import eu.stratosphere.nephele.jobmanager.archive.MemoryArchivist;
-import eu.stratosphere.nephele.jobmanager.scheduler.AbstractScheduler;
+import eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler;
import eu.stratosphere.nephele.jobmanager.scheduler.SchedulingException;
import eu.stratosphere.nephele.jobmanager.splitassigner.InputSplitManager;
import eu.stratosphere.nephele.jobmanager.splitassigner.InputSplitWrapper;
@@ -106,6 +105,7 @@ import eu.stratosphere.nephele.taskmanager.TaskSubmissionResult;
import eu.stratosphere.runtime.io.network.ConnectionInfoLookupResponse;
import eu.stratosphere.runtime.io.network.RemoteReceiver;
import eu.stratosphere.nephele.taskmanager.ExecutorThreadFactory;
+import eu.stratosphere.nephele.taskmanager.transferenvelope.RegisterTaskManagerResult;
import eu.stratosphere.nephele.topology.NetworkTopology;
import eu.stratosphere.nephele.types.IntegerRecord;
import eu.stratosphere.nephele.util.SerializableArrayList;
@@ -135,7 +135,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
private final InputSplitManager inputSplitManager;
- private final AbstractScheduler scheduler;
+ private final DefaultScheduler scheduler;
private AccumulatorManager accumulatorManager;
@@ -213,20 +213,11 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
LOG.info("Starting job manager in " + executionMode + " mode");
// Try to load the instance manager for the given execution mode
- // Try to load the scheduler for the given execution mode
- if (executionMode == ExecutionMode.LOCAL) {
- try {
- this.instanceManager = new LocalInstanceManager();
- } catch (Throwable t) {
- throw new Exception("Cannot instantiate local instance manager: " + t.getMessage(), t);
- }
- } else {
- final String instanceManagerClassName = JobManagerUtils.getInstanceManagerClassName(executionMode);
- LOG.info("Trying to load " + instanceManagerClassName + " as instance manager");
- this.instanceManager = JobManagerUtils.loadInstanceManager(instanceManagerClassName);
- if (this.instanceManager == null) {
- throw new Exception("Unable to load instance manager " + instanceManagerClassName);
- }
+ final String instanceManagerClassName = JobManagerUtils.getInstanceManagerClassName(executionMode);
+ LOG.info("Trying to load " + instanceManagerClassName + " as instance manager");
+ this.instanceManager = JobManagerUtils.loadInstanceManager(instanceManagerClassName);
+ if (this.instanceManager == null) {
+ throw new Exception("Unable to load instance manager " + instanceManagerClassName);
}
// Try to load the scheduler for the given execution mode
@@ -479,7 +470,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
ExecutionGraph eg;
try {
- eg = new ExecutionGraph(job, this.instanceManager);
+ eg = new ExecutionGraph(job, this.getAvailableSlots());
} catch (GraphConversionException e) {
if (e.getCause() == null) {
return new JobSubmissionResult(AbstractJobResult.ReturnCode.ERROR, StringUtils.stringifyException(e));
@@ -520,7 +511,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
}
try {
- this.scheduler.schedulJob(eg);
+ this.scheduler.scheduleJob(eg);
} catch (SchedulingException e) {
unregisterJob(eg);
JobSubmissionResult result = new JobSubmissionResult(AbstractJobResult.ReturnCode.ERROR, StringUtils.stringifyException(e));
@@ -561,10 +552,6 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
}
}
- // Cancel all pending requests for instances
- this.instanceManager.cancelPendingRequests(executionGraph.getJobID()); // getJobID is final member, no
- // synchronization necessary
-
// Remove job from input split manager
if (this.inputSplitManager != null) {
this.inputSplitManager.unregisterJob(executionGraph);
@@ -582,8 +569,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
@Override
- public void sendHeartbeat(final InstanceConnectionInfo instanceConnectionInfo,
- final HardwareDescription hardwareDescription) {
+ public void sendHeartbeat(final InstanceConnectionInfo instanceConnectionInfo) {
// Delegate call to instance manager
if (this.instanceManager != null) {
@@ -592,7 +578,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
@Override
public void run() {
- instanceManager.reportHeartBeat(instanceConnectionInfo, hardwareDescription);
+ instanceManager.reportHeartBeat(instanceConnectionInfo);
}
};
@@ -600,6 +586,25 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
}
}
+ @Override
+ public RegisterTaskManagerResult registerTaskManager(final InstanceConnectionInfo instanceConnectionInfo,
+ final HardwareDescription hardwareDescription, final IntegerRecord numberOfSlots){
+ if(this.instanceManager != null) {
+ final Runnable registerTaskManagerRunnable = new Runnable() {
+ @Override
+ public void run(){
+ instanceManager.registerTaskManager(instanceConnectionInfo, hardwareDescription,
+ numberOfSlots.getValue());
+ }
+ };
+
+ this.executorService.execute(registerTaskManagerRunnable);
+ return new RegisterTaskManagerResult(RegisterTaskManagerResult.ReturnCode.SUCCESS);
+ }
+
+ return new RegisterTaskManagerResult(RegisterTaskManagerResult.ReturnCode.FAILURE);
+ }
+
@Override
public void updateTaskExecutionState(final TaskExecutionState executionState) throws IOException {
@@ -730,9 +735,10 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
if (sourceChannelID.equals(edge.getInputChannelID())) {
// Request was sent from an input channel
+
final ExecutionVertex connectedVertex = edge.getOutputGate().getVertex();
- final AbstractInstance assignedInstance = connectedVertex.getAllocatedResource().getInstance();
+ final Instance assignedInstance = connectedVertex.getAllocatedResource().getInstance();
if (assignedInstance == null) {
LOG.error("Cannot resolve lookup: vertex found for channel ID " + edge.getOutputGateIndex()
+ " but no instance assigned");
@@ -758,6 +764,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
return ConnectionInfoLookupResponse.createReceiverFoundAndReady(edge.getOutputChannelID());
} else {
// Receiver runs on a different task manager
+
final InstanceConnectionInfo ici = assignedInstance.getInstanceConnectionInfo();
final InetSocketAddress isa = new InetSocketAddress(ici.address(), ici.dataPort());
@@ -788,7 +795,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
return ConnectionInfoLookupResponse.createReceiverNotReady();
}
- final AbstractInstance assignedInstance = targetVertex.getAllocatedResource().getInstance();
+ final Instance assignedInstance = targetVertex.getAllocatedResource().getInstance();
if (assignedInstance == null) {
LOG.error("Cannot resolve lookup: vertex found for channel ID " + edge.getInputChannelID() + " but no instance assigned");
// LOG.info("Created receiverNotReady for " + targetVertex + " in state " + executionState + " 4");
@@ -877,6 +884,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
return eventList;
}
+
@Override
public void killTask(final JobID jobID, final ManagementVertexID id) throws IOException {
@@ -909,10 +917,11 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
eg.executeCommand(runnable);
}
+
@Override
public void killInstance(final StringRecord instanceName) throws IOException {
- final AbstractInstance instance = this.instanceManager.getInstanceByName(instanceName.toString());
+ final Instance instance = this.instanceManager.getInstanceByName(instanceName.toString());
if (instance == null) {
LOG.error("Cannot find instance with name " + instanceName + " to kill it");
return;
@@ -947,16 +956,6 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
}
- public Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes() {
-
- // Delegate call to the instance manager
- if (this.instanceManager != null) {
- return this.instanceManager.getMapOfAvailableInstanceTypes();
- }
-
- return null;
- }
-
@Override
public void jobStatusHasChanged(final ExecutionGraph executionGraph, final InternalJobStatus newJobStatus,
@@ -987,7 +986,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
return;
}
- final Set<AbstractInstance> allocatedInstance = new HashSet<AbstractInstance>();
+ final Set<Instance> allocatedInstance = new HashSet<Instance>();
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(eg, true);
while (it.hasNext()) {
@@ -995,7 +994,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
final ExecutionVertex vertex = it.next();
final ExecutionState state = vertex.getExecutionState();
if (state == ExecutionState.RUNNING || state == ExecutionState.FINISHING) {
- final AbstractInstance instance = vertex.getAllocatedResource().getInstance();
+ final Instance instance = vertex.getAllocatedResource().getInstance();
if (instance instanceof DummyInstance) {
LOG.error("Found instance of type DummyInstance for vertex " + vertex.getName() + " (state "
@@ -1013,7 +1012,7 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
@Override
public void run() {
- final Iterator<AbstractInstance> it2 = allocatedInstance.iterator();
+ final Iterator<Instance> it2 = allocatedInstance.iterator();
try {
while (it2.hasNext()) {
@@ -1030,9 +1029,14 @@ public class JobManager implements DeploymentManager, ExtendedManagementProtocol
this.executorService.execute(requestRunnable);
}
+ @Override
+ public int getAvailableSlots() {
+ return getInstanceManager().getNumberOfSlots();
+ }
+
@Override
- public void deploy(final JobID jobID, final AbstractInstance instance,
+ public void deploy(final JobID jobID, final Instance instance,
final List<ExecutionVertex> verticesToBeDeployed) {
if (verticesToBeDeployed.isEmpty()) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManagerUtils.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManagerUtils.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManagerUtils.java
index 5b0b30d..45506aa 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManagerUtils.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/JobManagerUtils.java
@@ -20,12 +20,11 @@ import java.lang.reflect.InvocationTargetException;
import java.util.Properties;
import eu.stratosphere.nephele.ExecutionMode;
-
+import eu.stratosphere.nephele.instance.InstanceManager;
+import eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.jobmanager.scheduler.AbstractScheduler;
import eu.stratosphere.util.StringUtils;
/**
@@ -47,7 +46,7 @@ public class JobManagerUtils {
/**
* Tries to locate a class with given name and to
- * instantiate a {@link AbstractScheduler} object from it.
+ * instantiate a {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} object from it.
*
* @param schedulerClassName
* the name of the class to instantiate the scheduler object from
@@ -55,21 +54,21 @@ public class JobManagerUtils {
* the deployment manager which shall be passed on to the scheduler
* @param instanceManager
* the instance manager which shall be passed on to the scheduler
- * @return the {@link AbstractScheduler} object instantiated from the class with the provided name
+ * @return the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} object instantiated from the class with the provided name
*/
@SuppressWarnings("unchecked")
- static AbstractScheduler loadScheduler(final String schedulerClassName, final DeploymentManager deploymentManager,
+ static DefaultScheduler loadScheduler(final String schedulerClassName, final DeploymentManager deploymentManager,
final InstanceManager instanceManager) {
- Class<? extends AbstractScheduler> schedulerClass;
+ Class<? extends DefaultScheduler> schedulerClass;
try {
- schedulerClass = (Class<? extends AbstractScheduler>) Class.forName(schedulerClassName);
+ schedulerClass = (Class<? extends DefaultScheduler>) Class.forName(schedulerClassName);
} catch (ClassNotFoundException e) {
LOG.error("Cannot find class " + schedulerClassName + ": " + StringUtils.stringifyException(e));
return null;
}
- Constructor<? extends AbstractScheduler> constructor;
+ Constructor<? extends DefaultScheduler> constructor;
try {
@@ -83,7 +82,7 @@ public class JobManagerUtils {
return null;
}
- AbstractScheduler scheduler;
+ DefaultScheduler scheduler;
try {
scheduler = constructor.newInstance(deploymentManager, instanceManager);
@@ -110,7 +109,7 @@ public class JobManagerUtils {
*
* @param instanceManagerClassName
* the name of the class to instantiate the instance manager object from
- * @return the {@link InstanceManager} object instantiated from the class with the provided name
+ * @return the {@link eu.stratosphere.nephele.instance.InstanceManager} object instantiated from the class with the provided name
*/
@SuppressWarnings("unchecked")
static InstanceManager loadInstanceManager(final String instanceManagerClassName) {
@@ -139,53 +138,34 @@ public class JobManagerUtils {
}
/**
- * Tries to read the class name of the {@link AbstractScheduler} implementation from the global configuration which
+ * Tries to read the class name of the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} implementation from the global configuration which
* is set to be used for the provided execution mode.
*
* @param executionMode The Nephele execution mode.
- * @return the class name of the {@link AbstractScheduler} implementation to be used or <code>null</code> if no
+ * @return the class name of the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} implementation to be used or <code>null</code> if no
* implementation is configured for the given execution mode
*/
static String getSchedulerClassName(ExecutionMode executionMode) {
- switch (executionMode) {
- case LOCAL:
- return "eu.stratosphere.nephele.jobmanager.scheduler.local.LocalScheduler";
- case CLUSTER:
- return "eu.stratosphere.nephele.jobmanager.scheduler.queue.QueueScheduler";
- default:
- throw new RuntimeException("Unrecognized Execution Mode.");
- }
-// String modeClass = getClassStringForMode(executionMode);
-// String instanceManagerClassNameKey = "jobmanager.scheduler." + modeClass + ".classname";
-// String schedulerClassName = GlobalConfiguration.getString(instanceManagerClassNameKey, null);
-//
-// if (executionMode == ExecutionMode.LOCAL && schedulerClassName == null) {
-// schedulerClassName = ConfigConstants.DEFAULT_LOCAL_MODE_SCHEDULER;
-// }
-// return schedulerClassName;
+ return "eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler";
}
/**
- * Tries to read the class name of the {@link InstanceManager} implementation from the global configuration which is
+ * Tries to read the class name of the {@link eu.stratosphere.nephele.instance.InstanceManager} implementation from the global configuration which is
* set to be used for the provided execution mode.
*
* @param executionMode The Nephele execution mode.
- * @return the class name of the {@link InstanceManager} implementation to be used or <code>null</code> if no
+ * @return the class name of the {@link eu.stratosphere.nephele.instance.InstanceManager} implementation to be used or <code>null</code> if no
* implementation is configured for the given execution mode
*/
static String getInstanceManagerClassName(ExecutionMode executionMode) {
switch (executionMode) {
case LOCAL:
- return "eu.stratosphere.nephele.instance.local.LocalInstanceManager";
+ return "eu.stratosphere.nephele.instance.LocalInstanceManager";
case CLUSTER:
- return "eu.stratosphere.nephele.instance.cluster.ClusterManager";
+ return "eu.stratosphere.nephele.instance.DefaultInstanceManager";
default:
throw new RuntimeException("Unrecognized Execution Mode.");
}
-//
-// final String modeClass = getClassStringForMode(executionMode);
-// final String instanceManagerClassNameKey = "jobmanager.instancemanager." + modeClass + ".classname";
-// return GlobalConfiguration.getString(instanceManagerClassNameKey, null);
}
/**
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractExecutionListener.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractExecutionListener.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractExecutionListener.java
deleted file mode 100644
index 5b528c7..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractExecutionListener.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager.scheduler;
-
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-
-import eu.stratosphere.nephele.execution.ExecutionListener;
-import eu.stratosphere.nephele.execution.ExecutionState;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
-import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
-import eu.stratosphere.nephele.executiongraph.ExecutionPipeline;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
-import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
-import eu.stratosphere.nephele.jobgraph.JobID;
-
-public abstract class AbstractExecutionListener implements ExecutionListener {
-
- /**
- * The instance of the {@link LocalScheduler}.
- */
- private final AbstractScheduler scheduler;
-
- /**
- * The {@link ExecutionVertex} this wrapper object belongs to.
- */
- private final ExecutionVertex executionVertex;
-
- /**
- * Constructs a new wrapper object for the given {@link ExecutionVertex}.
- *
- * @param AbstractScheduler
- * the instance of the {@link AbstractScheduler}
- * @param executionVertex
- * the {@link ExecutionVertex} the received notification refer to
- */
- public AbstractExecutionListener(final AbstractScheduler scheduler, final ExecutionVertex executionVertex) {
- this.scheduler = scheduler;
- this.executionVertex = executionVertex;
- }
-
-
- @Override
- public void executionStateChanged(final JobID jobID, final ExecutionVertexID vertexID,
- final ExecutionState newExecutionState, final String optionalMessage) {
-
- final ExecutionGraph eg = this.executionVertex.getExecutionGraph();
-
- // Check if we can deploy a new pipeline.
- if (newExecutionState == ExecutionState.FINISHING) {
-
- final ExecutionPipeline pipeline = this.executionVertex.getExecutionPipeline();
- if (!pipeline.isFinishing()) {
- // Some tasks of the pipeline are still running
- return;
- }
-
- // Find another vertex in the group which is still in SCHEDULED state and get its pipeline.
- final ExecutionGroupVertex groupVertex = this.executionVertex.getGroupVertex();
- for (int i = 0; i < groupVertex.getCurrentNumberOfGroupMembers(); ++i) {
- final ExecutionVertex groupMember = groupVertex.getGroupMember(i);
- if (groupMember.compareAndUpdateExecutionState(ExecutionState.SCHEDULED, ExecutionState.ASSIGNED)) {
-
- final ExecutionPipeline pipelineToBeDeployed = groupMember.getExecutionPipeline();
- pipelineToBeDeployed.setAllocatedResource(this.executionVertex.getAllocatedResource());
- pipelineToBeDeployed.updateExecutionState(ExecutionState.ASSIGNED);
-
- this.scheduler.deployAssignedPipeline(pipelineToBeDeployed);
- return;
- }
- }
- }
-
- if (newExecutionState == ExecutionState.CANCELED || newExecutionState == ExecutionState.FINISHED) {
-
- synchronized (eg) {
-
- if (this.scheduler.getVerticesToBeRestarted().remove(this.executionVertex.getID()) != null) {
-
- if (eg.getJobStatus() == InternalJobStatus.FAILING) {
- return;
- }
-
- this.executionVertex.updateExecutionState(ExecutionState.ASSIGNED, "Restart as part of recovery");
-
- // Run through the deployment procedure
- this.scheduler.deployAssignedVertices(this.executionVertex);
- return;
- }
- }
- }
-
- if (newExecutionState == ExecutionState.FINISHED || newExecutionState == ExecutionState.CANCELED
- || newExecutionState == ExecutionState.FAILED) {
- // Check if instance can be released
- this.scheduler.checkAndReleaseAllocatedResource(eg, this.executionVertex.getAllocatedResource());
- }
-
- // In case of an error, check if the vertex shall be recovered
- if (newExecutionState == ExecutionState.FAILED) {
- if (this.executionVertex.decrementRetriesLeftAndCheck()) {
-
- final Set<ExecutionVertex> assignedVertices = new HashSet<ExecutionVertex>();
-
- if (RecoveryLogic.recover(this.executionVertex, this.scheduler.getVerticesToBeRestarted(),
- assignedVertices)) {
-
- if (RecoveryLogic.hasInstanceAssigned(this.executionVertex)) {
- // Run through the deployment procedure
- this.scheduler.deployAssignedVertices(assignedVertices);
- }
-
- } else {
-
- // Make sure the map with the vertices to be restarted is cleaned up properly
- synchronized (eg) {
-
- final Iterator<ExecutionVertex> it = this.scheduler.getVerticesToBeRestarted().values()
- .iterator();
-
- while (it.hasNext()) {
- if (eg.equals(it.next().getExecutionGraph())) {
- it.remove();
- }
- }
- }
-
- // Actual cancellation of job is performed by job manager
- }
- }
- }
-
- }
-
-
- @Override
- public void userThreadFinished(final JobID jobID, final ExecutionVertexID vertexID, final Thread userThread) {
- // Nothing to do here
- }
-
-
- @Override
- public void userThreadStarted(final JobID jobID, final ExecutionVertexID vertexID, final Thread userThread) {
- // Nothing to do here
- }
-
-
- @Override
- public int getPriority() {
-
- return 0;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractScheduler.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractScheduler.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractScheduler.java
deleted file mode 100644
index 24e2970..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/AbstractScheduler.java
+++ /dev/null
@@ -1,662 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager.scheduler;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import eu.stratosphere.nephele.execution.ExecutionState;
-import eu.stratosphere.nephele.executiongraph.ExecutionEdge;
-import eu.stratosphere.nephele.executiongraph.ExecutionGate;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
-import eu.stratosphere.nephele.executiongraph.ExecutionGraphIterator;
-import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
-import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertexIterator;
-import eu.stratosphere.nephele.executiongraph.ExecutionPipeline;
-import eu.stratosphere.nephele.executiongraph.ExecutionStage;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
-import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
-import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
-import eu.stratosphere.nephele.instance.AbstractInstance;
-import eu.stratosphere.nephele.instance.AllocatedResource;
-import eu.stratosphere.nephele.instance.AllocationID;
-import eu.stratosphere.nephele.instance.DummyInstance;
-import eu.stratosphere.nephele.instance.InstanceException;
-import eu.stratosphere.nephele.instance.InstanceListener;
-import eu.stratosphere.nephele.instance.InstanceManager;
-import eu.stratosphere.nephele.instance.InstanceRequestMap;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.jobgraph.JobID;
-import eu.stratosphere.nephele.jobmanager.DeploymentManager;
-import eu.stratosphere.util.StringUtils;
-
-/**
- * This abstract scheduler must be extended by a scheduler implementations for Nephele. The abstract class defines the
- * fundamental methods for scheduling and removing jobs. While Nephele's
- * {@link eu.stratosphere.nephele.jobmanager.JobManager} is responsible for requesting the required instances for the
- * job at the {@link eu.stratosphere.nephele.instance.InstanceManager}, the scheduler is in charge of assigning the
- * individual tasks to the instances.
- *
- */
-public abstract class AbstractScheduler implements InstanceListener {
-
- /**
- * The LOG object to report events within the scheduler.
- */
- protected static final Log LOG = LogFactory.getLog(AbstractScheduler.class);
-
- /**
- * The instance manager assigned to this scheduler.
- */
- private final InstanceManager instanceManager;
-
- /**
- * The deployment manager assigned to this scheduler.
- */
- private final DeploymentManager deploymentManager;
-
- /**
- * Stores the vertices to be restarted once they have switched to the <code>CANCELED</code> state.
- */
- private final Map<ExecutionVertexID, ExecutionVertex> verticesToBeRestarted = new ConcurrentHashMap<ExecutionVertexID, ExecutionVertex>();
-
- /**
- * Constructs a new abstract scheduler.
- *
- * @param deploymentManager
- * the deployment manager assigned to this scheduler
- * @param instanceManager
- * the instance manager to be used with this scheduler
- */
- protected AbstractScheduler(final DeploymentManager deploymentManager, final InstanceManager instanceManager) {
-
- this.deploymentManager = deploymentManager;
- this.instanceManager = instanceManager;
- this.instanceManager.setInstanceListener(this);
- }
-
- /**
- * Adds a job represented by an {@link ExecutionGraph} object to the scheduler. The job is then executed according
- * to the strategies of the concrete scheduler implementation.
- *
- * @param executionGraph
- * the job to be added to the scheduler
- * @throws SchedulingException
- * thrown if an error occurs and the scheduler does not accept the new job
- */
- public abstract void schedulJob(ExecutionGraph executionGraph) throws SchedulingException;
-
- /**
- * Returns the execution graph which is associated with the given job ID.
- *
- * @param jobID
- * the job ID to search the execution graph for
- * @return the execution graph which belongs to the given job ID or <code>null</code if no such execution graph
- * exists
- */
- public abstract ExecutionGraph getExecutionGraphByID(JobID jobID);
-
- /**
- * Returns the {@link InstanceManager} object which is used by the current scheduler.
- *
- * @return the {@link InstanceManager} object which is used by the current scheduler
- */
- public InstanceManager getInstanceManager() {
- return this.instanceManager;
- }
-
- // void removeJob(JobID jobID);
-
- /**
- * Shuts the scheduler down. After shut down no jobs can be added to the scheduler.
- */
- public abstract void shutdown();
-
- /**
- * Collects the instances required to run the job from the given {@link ExecutionStage} and requests them at the
- * loaded instance manager.
- *
- * @param executionStage
- * the execution stage to collect the required instances from
- * @throws InstanceException
- * thrown if the given execution graph is already processing its final stage
- */
- protected void requestInstances(final ExecutionStage executionStage) throws InstanceException {
-
- final ExecutionGraph executionGraph = executionStage.getExecutionGraph();
- final InstanceRequestMap instanceRequestMap = new InstanceRequestMap();
-
- synchronized (executionStage) {
-
- executionStage.collectRequiredInstanceTypes(instanceRequestMap, ExecutionState.CREATED);
-
- final Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMinimumIterator();
- LOG.info("Requesting the following instances for job " + executionGraph.getJobID());
- while (it.hasNext()) {
- final Map.Entry<InstanceType, Integer> entry = it.next();
- LOG.info(" " + entry.getKey() + " [" + entry.getValue().intValue() + ", "
- + instanceRequestMap.getMaximumNumberOfInstances(entry.getKey()) + "]");
- }
-
- if (instanceRequestMap.isEmpty()) {
- return;
- }
-
- this.instanceManager.requestInstance(executionGraph.getJobID(), executionGraph.getJobConfiguration(),
- instanceRequestMap, null);
-
- // Switch vertex state to assigning
- final ExecutionGraphIterator it2 = new ExecutionGraphIterator(executionGraph, executionGraph
- .getIndexOfCurrentExecutionStage(), true, true);
- while (it2.hasNext()) {
-
- it2.next().compareAndUpdateExecutionState(ExecutionState.CREATED, ExecutionState.SCHEDULED);
- }
- }
- }
-
- void findVerticesToBeDeployed(final ExecutionVertex vertex,
- final Map<AbstractInstance, List<ExecutionVertex>> verticesToBeDeployed,
- final Set<ExecutionVertex> alreadyVisited) {
-
- if (!alreadyVisited.add(vertex)) {
- return;
- }
-
- if (vertex.compareAndUpdateExecutionState(ExecutionState.ASSIGNED, ExecutionState.READY)) {
- final AbstractInstance instance = vertex.getAllocatedResource().getInstance();
-
- if (instance instanceof DummyInstance) {
- LOG.error("Inconsistency: Vertex " + vertex + " is about to be deployed on a DummyInstance");
- }
-
- List<ExecutionVertex> verticesForInstance = verticesToBeDeployed.get(instance);
- if (verticesForInstance == null) {
- verticesForInstance = new ArrayList<ExecutionVertex>();
- verticesToBeDeployed.put(instance, verticesForInstance);
- }
-
- verticesForInstance.add(vertex);
- }
-
- final int numberOfOutputGates = vertex.getNumberOfOutputGates();
- for (int i = 0; i < numberOfOutputGates; ++i) {
-
- final ExecutionGate outputGate = vertex.getOutputGate(i);
- boolean deployTarget;
-
- switch (outputGate.getChannelType()) {
- case NETWORK:
- deployTarget = false;
- break;
- case IN_MEMORY:
- deployTarget = true;
- break;
- default:
- throw new IllegalStateException("Unknown channel type");
- }
-
- if (deployTarget) {
-
- final int numberOfOutputChannels = outputGate.getNumberOfEdges();
- for (int j = 0; j < numberOfOutputChannels; ++j) {
- final ExecutionEdge outputChannel = outputGate.getEdge(j);
- final ExecutionVertex connectedVertex = outputChannel.getInputGate().getVertex();
- findVerticesToBeDeployed(connectedVertex, verticesToBeDeployed, alreadyVisited);
- }
- }
- }
- }
-
- /**
- * Collects all execution vertices with the state ASSIGNED starting from the given start vertex and
- * deploys them on the assigned {@link AllocatedResource} objects.
- *
- * @param startVertex
- * the execution vertex to start the deployment from
- */
- public void deployAssignedVertices(final ExecutionVertex startVertex) {
-
- final JobID jobID = startVertex.getExecutionGraph().getJobID();
-
- final Map<AbstractInstance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<AbstractInstance, List<ExecutionVertex>>();
- final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
-
- findVerticesToBeDeployed(startVertex, verticesToBeDeployed, alreadyVisited);
-
- if (!verticesToBeDeployed.isEmpty()) {
-
- final Iterator<Map.Entry<AbstractInstance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
- .entrySet()
- .iterator();
-
- while (it2.hasNext()) {
-
- final Map.Entry<AbstractInstance, List<ExecutionVertex>> entry = it2.next();
- this.deploymentManager.deploy(jobID, entry.getKey(), entry.getValue());
- }
- }
- }
-
- /**
- * Collects all execution vertices with the state ASSIGNED from the given pipeline and deploys them on the assigned
- * {@link AllocatedResource} objects.
- *
- * @param pipeline
- * the execution pipeline to be deployed
- */
- public void deployAssignedPipeline(final ExecutionPipeline pipeline) {
-
- final JobID jobID = null;
-
- final Map<AbstractInstance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<AbstractInstance, List<ExecutionVertex>>();
- final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
-
- final Iterator<ExecutionVertex> it = pipeline.iterator();
- while (it.hasNext()) {
- findVerticesToBeDeployed(it.next(), verticesToBeDeployed, alreadyVisited);
- }
-
- if (!verticesToBeDeployed.isEmpty()) {
-
- final Iterator<Map.Entry<AbstractInstance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
- .entrySet()
- .iterator();
-
- while (it2.hasNext()) {
-
- final Map.Entry<AbstractInstance, List<ExecutionVertex>> entry = it2.next();
- this.deploymentManager.deploy(jobID, entry.getKey(), entry.getValue());
- }
- }
- }
-
- /**
- * Collects all execution vertices with the state ASSIGNED starting from the given collection of start vertices and
- * deploys them on the assigned {@link AllocatedResource} objects.
- *
- * @param startVertices
- * the collection of execution vertices to start the deployment from
- */
- public void deployAssignedVertices(final Collection<ExecutionVertex> startVertices) {
-
- JobID jobID = null;
-
- final Map<AbstractInstance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<AbstractInstance, List<ExecutionVertex>>();
- final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
-
- for (final ExecutionVertex startVertex : startVertices) {
-
- if (jobID == null) {
- jobID = startVertex.getExecutionGraph().getJobID();
- }
-
- findVerticesToBeDeployed(startVertex, verticesToBeDeployed, alreadyVisited);
- }
-
- if (!verticesToBeDeployed.isEmpty()) {
-
- final Iterator<Map.Entry<AbstractInstance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
- .entrySet()
- .iterator();
-
- while (it2.hasNext()) {
-
- final Map.Entry<AbstractInstance, List<ExecutionVertex>> entry = it2.next();
- this.deploymentManager.deploy(jobID, entry.getKey(), entry.getValue());
- }
- }
- }
-
- /**
- * Collects all execution vertices with the state ASSIGNED starting from the input vertices of the current execution
- * stage and deploys them on the assigned {@link AllocatedResource} objects.
- *
- * @param executionGraph
- * the execution graph to collect the vertices from
- */
- public void deployAssignedInputVertices(final ExecutionGraph executionGraph) {
-
- final Map<AbstractInstance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<AbstractInstance, List<ExecutionVertex>>();
- final ExecutionStage executionStage = executionGraph.getCurrentExecutionStage();
-
- final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
-
- for (int i = 0; i < executionStage.getNumberOfStageMembers(); ++i) {
-
- final ExecutionGroupVertex startVertex = executionStage.getStageMember(i);
- if (!startVertex.isInputVertex()) {
- continue;
- }
-
- for (int j = 0; j < startVertex.getCurrentNumberOfGroupMembers(); ++j) {
- final ExecutionVertex vertex = startVertex.getGroupMember(j);
- findVerticesToBeDeployed(vertex, verticesToBeDeployed, alreadyVisited);
- }
- }
-
- if (!verticesToBeDeployed.isEmpty()) {
-
- final Iterator<Map.Entry<AbstractInstance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
- .entrySet()
- .iterator();
-
- while (it2.hasNext()) {
-
- final Map.Entry<AbstractInstance, List<ExecutionVertex>> entry = it2.next();
- this.deploymentManager.deploy(executionGraph.getJobID(), entry.getKey(), entry.getValue());
- }
- }
- }
-
-
- @Override
- public void resourcesAllocated(final JobID jobID, final List<AllocatedResource> allocatedResources) {
-
- if (allocatedResources == null) {
- LOG.error("Resource to lock is null!");
- return;
- }
-
- for (final AllocatedResource allocatedResource : allocatedResources) {
- if (allocatedResource.getInstance() instanceof DummyInstance) {
- LOG.debug("Available instance is of type DummyInstance!");
- return;
- }
- }
-
- final ExecutionGraph eg = getExecutionGraphByID(jobID);
-
- if (eg == null) {
- /*
- * The job have have been canceled in the meantime, in this case
- * we release the instance immediately.
- */
- try {
- for (final AllocatedResource allocatedResource : allocatedResources) {
- getInstanceManager().releaseAllocatedResource(jobID, null, allocatedResource);
- }
- } catch (InstanceException e) {
- LOG.error(e);
- }
- return;
- }
-
- final Runnable command = new Runnable() {
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void run() {
-
- final ExecutionStage stage = eg.getCurrentExecutionStage();
-
- synchronized (stage) {
-
- for (final AllocatedResource allocatedResource : allocatedResources) {
-
- AllocatedResource resourceToBeReplaced = null;
- // Important: only look for instances to be replaced in the current stage
- final Iterator<ExecutionGroupVertex> groupIterator = new ExecutionGroupVertexIterator(eg, true,
- stage.getStageNumber());
- while (groupIterator.hasNext()) {
-
- final ExecutionGroupVertex groupVertex = groupIterator.next();
- for (int i = 0; i < groupVertex.getCurrentNumberOfGroupMembers(); ++i) {
-
- final ExecutionVertex vertex = groupVertex.getGroupMember(i);
-
- if (vertex.getExecutionState() == ExecutionState.SCHEDULED
- && vertex.getAllocatedResource() != null) {
- // In local mode, we do not consider any topology, only the instance type
- if (vertex.getAllocatedResource().getInstanceType().equals(
- allocatedResource.getInstanceType())) {
- resourceToBeReplaced = vertex.getAllocatedResource();
- break;
- }
- }
- }
-
- if (resourceToBeReplaced != null) {
- break;
- }
- }
-
- // For some reason, we don't need this instance
- if (resourceToBeReplaced == null) {
- LOG.error("Instance " + allocatedResource.getInstance() + " is not required for job"
- + eg.getJobID());
- try {
- getInstanceManager().releaseAllocatedResource(jobID, eg.getJobConfiguration(),
- allocatedResource);
- } catch (InstanceException e) {
- LOG.error(e);
- }
- return;
- }
-
- // Replace the selected instance
- final Iterator<ExecutionVertex> it = resourceToBeReplaced.assignedVertices();
- while (it.hasNext()) {
- final ExecutionVertex vertex = it.next();
- vertex.setAllocatedResource(allocatedResource);
- vertex.updateExecutionState(ExecutionState.ASSIGNED);
- }
- }
- }
-
- // Deploy the assigned vertices
- deployAssignedInputVertices(eg);
-
- }
-
- };
-
- eg.executeCommand(command);
- }
-
- /**
- * Checks if the given {@link AllocatedResource} is still required for the
- * execution of the given execution graph. If the resource is no longer
- * assigned to a vertex that is either currently running or about to run
- * the given resource is returned to the instance manager for deallocation.
- *
- * @param executionGraph
- * the execution graph the provided resource has been used for so far
- * @param allocatedResource
- * the allocated resource to check the assignment for
- */
- public void checkAndReleaseAllocatedResource(final ExecutionGraph executionGraph,
- final AllocatedResource allocatedResource) {
-
- if (allocatedResource == null) {
- LOG.error("Resource to lock is null!");
- return;
- }
-
- if (allocatedResource.getInstance() instanceof DummyInstance) {
- LOG.debug("Available instance is of type DummyInstance!");
- return;
- }
-
- boolean resourceCanBeReleased = true;
- final Iterator<ExecutionVertex> it = allocatedResource.assignedVertices();
- while (it.hasNext()) {
- final ExecutionVertex vertex = it.next();
- final ExecutionState state = vertex.getExecutionState();
-
- if (state != ExecutionState.CREATED && state != ExecutionState.FINISHED
- && state != ExecutionState.FAILED && state != ExecutionState.CANCELED) {
-
- resourceCanBeReleased = false;
- break;
- }
- }
-
- if (resourceCanBeReleased) {
-
- LOG.info("Releasing instance " + allocatedResource.getInstance());
- try {
- getInstanceManager().releaseAllocatedResource(executionGraph.getJobID(), executionGraph
- .getJobConfiguration(), allocatedResource);
- } catch (InstanceException e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- }
- }
-
- DeploymentManager getDeploymentManager() {
- return this.deploymentManager;
- }
-
- protected void replayCheckpointsFromPreviousStage(final ExecutionGraph executionGraph) {
-
- final int currentStageIndex = executionGraph.getIndexOfCurrentExecutionStage();
- final ExecutionStage previousStage = executionGraph.getStage(currentStageIndex - 1);
-
- final List<ExecutionVertex> verticesToBeReplayed = new ArrayList<ExecutionVertex>();
-
- for (int i = 0; i < previousStage.getNumberOfOutputExecutionVertices(); ++i) {
-
- final ExecutionVertex vertex = previousStage.getOutputExecutionVertex(i);
- vertex.updateExecutionState(ExecutionState.ASSIGNED);
- verticesToBeReplayed.add(vertex);
- }
-
- deployAssignedVertices(verticesToBeReplayed);
- }
-
- /**
- * Returns a map of vertices to be restarted once they have switched to their <code>CANCELED</code> state.
- *
- * @return the map of vertices to be restarted
- */
- Map<ExecutionVertexID, ExecutionVertex> getVerticesToBeRestarted() {
-
- return this.verticesToBeRestarted;
- }
-
-
- @Override
- public void allocatedResourcesDied(final JobID jobID, final List<AllocatedResource> allocatedResources) {
-
- final ExecutionGraph eg = getExecutionGraphByID(jobID);
-
- if (eg == null) {
- LOG.error("Cannot find execution graph for job with ID " + jobID);
- return;
- }
-
- final Runnable command = new Runnable() {
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void run() {
-
- synchronized (eg) {
-
- for (final AllocatedResource allocatedResource : allocatedResources) {
-
- LOG.info("Resource " + allocatedResource.getInstance().getName() + " for Job " + jobID
- + " died.");
-
- final ExecutionGraph executionGraph = getExecutionGraphByID(jobID);
-
- if (executionGraph == null) {
- LOG.error("Cannot find execution graph for job " + jobID);
- return;
- }
-
- Iterator<ExecutionVertex> vertexIter = allocatedResource.assignedVertices();
-
- // Assign vertices back to a dummy resource.
- final DummyInstance dummyInstance = DummyInstance.createDummyInstance(allocatedResource
- .getInstance()
- .getType());
- final AllocatedResource dummyResource = new AllocatedResource(dummyInstance,
- allocatedResource.getInstanceType(), new AllocationID());
-
- while (vertexIter.hasNext()) {
- final ExecutionVertex vertex = vertexIter.next();
- vertex.setAllocatedResource(dummyResource);
- }
-
- final String failureMessage = allocatedResource.getInstance().getName() + " died";
-
- vertexIter = allocatedResource.assignedVertices();
-
- while (vertexIter.hasNext()) {
- final ExecutionVertex vertex = vertexIter.next();
- final ExecutionState state = vertex.getExecutionState();
-
- switch (state) {
- case ASSIGNED:
- case READY:
- case STARTING:
- case RUNNING:
- case FINISHING:
-
- vertex.updateExecutionState(ExecutionState.FAILED, failureMessage);
-
- break;
- default:
- }
- }
-
- // TODO: Fix this
- /*
- * try {
- * requestInstances(this.executionVertex.getGroupVertex().getExecutionStage());
- * } catch (InstanceException e) {
- * e.printStackTrace();
- * // TODO: Cancel the entire job in this case
- * }
- */
- }
- }
-
- final InternalJobStatus js = eg.getJobStatus();
- if (js != InternalJobStatus.FAILING && js != InternalJobStatus.FAILED) {
-
- // TODO: Fix this
- // deployAssignedVertices(eg);
-
- final ExecutionStage stage = eg.getCurrentExecutionStage();
-
- try {
- requestInstances(stage);
- } catch (InstanceException e) {
- e.printStackTrace();
- // TODO: Cancel the entire job in this case
- }
- }
- }
- };
-
- eg.executeCommand(command);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultExecutionListener.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultExecutionListener.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultExecutionListener.java
new file mode 100644
index 0000000..86b3c40
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultExecutionListener.java
@@ -0,0 +1,127 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.jobmanager.scheduler;
+
+import eu.stratosphere.nephele.execution.ExecutionListener;
+import eu.stratosphere.nephele.execution.ExecutionState;
+import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
+import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
+import eu.stratosphere.nephele.executiongraph.ExecutionPipeline;
+import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
+import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
+import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
+import eu.stratosphere.nephele.jobgraph.JobID;
+
+public class DefaultExecutionListener implements ExecutionListener {
+
+ /**
+ * The instance of the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler}.
+ */
+ private final DefaultScheduler scheduler;
+
+ /**
+ * The {@link ExecutionVertex} this wrapper object belongs to.
+ */
+ private final ExecutionVertex executionVertex;
+
+ /**
+ * Constructs a new wrapper object for the given {@link ExecutionVertex}.
+ *
+ * @param scheduler
+ * the instance of the {@link DefaultScheduler}
+ * @param executionVertex
+ * the {@link ExecutionVertex} the received notification refer to
+ */
+ public DefaultExecutionListener(final DefaultScheduler scheduler, final ExecutionVertex executionVertex) {
+ this.scheduler = scheduler;
+ this.executionVertex = executionVertex;
+ }
+
+
+ @Override
+ public void executionStateChanged(final JobID jobID, final ExecutionVertexID vertexID,
+ final ExecutionState newExecutionState, final String optionalMessage) {
+
+ final ExecutionGraph eg = this.executionVertex.getExecutionGraph();
+
+ // Check if we can deploy a new pipeline.
+ if (newExecutionState == ExecutionState.FINISHING) {
+
+ final ExecutionPipeline pipeline = this.executionVertex.getExecutionPipeline();
+ if (!pipeline.isFinishing()) {
+ // Some tasks of the pipeline are still running
+ return;
+ }
+
+ // Find another vertex in the group which is still in SCHEDULED state and get its pipeline.
+ final ExecutionGroupVertex groupVertex = this.executionVertex.getGroupVertex();
+ for (int i = 0; i < groupVertex.getCurrentNumberOfGroupMembers(); ++i) {
+ final ExecutionVertex groupMember = groupVertex.getGroupMember(i);
+ if (groupMember.compareAndUpdateExecutionState(ExecutionState.SCHEDULED, ExecutionState.ASSIGNED)) {
+
+ final ExecutionPipeline pipelineToBeDeployed = groupMember.getExecutionPipeline();
+ pipelineToBeDeployed.setAllocatedResource(this.executionVertex.getAllocatedResource());
+ pipelineToBeDeployed.updateExecutionState(ExecutionState.ASSIGNED);
+
+ this.scheduler.deployAssignedPipeline(pipelineToBeDeployed);
+ return;
+ }
+ }
+ }
+
+ if (newExecutionState == ExecutionState.CANCELED || newExecutionState == ExecutionState.FINISHED) {
+
+ synchronized (eg) {
+
+ if (this.scheduler.getVerticesToBeRestarted().remove(this.executionVertex.getID()) != null) {
+
+ if (eg.getJobStatus() == InternalJobStatus.FAILING) {
+ return;
+ }
+
+ this.executionVertex.updateExecutionState(ExecutionState.ASSIGNED, "Restart as part of recovery");
+
+ // Run through the deployment procedure
+ this.scheduler.deployAssignedVertices(this.executionVertex);
+ return;
+ }
+ }
+ }
+
+ if (newExecutionState == ExecutionState.FINISHED || newExecutionState == ExecutionState.CANCELED
+ || newExecutionState == ExecutionState.FAILED) {
+ // Check if instance can be released
+ this.scheduler.checkAndReleaseAllocatedResource(eg, this.executionVertex.getAllocatedResource());
+ }
+ }
+
+
+ @Override
+ public void userThreadFinished(final JobID jobID, final ExecutionVertexID vertexID, final Thread userThread) {
+ // Nothing to do here
+ }
+
+
+ @Override
+ public void userThreadStarted(final JobID jobID, final ExecutionVertexID vertexID, final Thread userThread) {
+ // Nothing to do here
+ }
+
+
+ @Override
+ public int getPriority() {
+
+ return 0;
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultScheduler.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultScheduler.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultScheduler.java
new file mode 100644
index 0000000..745b199
--- /dev/null
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/scheduler/DefaultScheduler.java
@@ -0,0 +1,762 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.jobmanager.scheduler;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.Deque;
+import java.util.ArrayDeque;
+
+import eu.stratosphere.nephele.executiongraph.ExecutionEdge;
+import eu.stratosphere.nephele.executiongraph.ExecutionGate;
+import eu.stratosphere.nephele.executiongraph.ExecutionGraph;
+import eu.stratosphere.nephele.executiongraph.ExecutionGraphIterator;
+import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertex;
+import eu.stratosphere.nephele.executiongraph.ExecutionGroupVertexIterator;
+import eu.stratosphere.nephele.executiongraph.ExecutionPipeline;
+import eu.stratosphere.nephele.executiongraph.ExecutionStage;
+import eu.stratosphere.nephele.executiongraph.ExecutionStageListener;
+import eu.stratosphere.nephele.executiongraph.ExecutionVertex;
+import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
+import eu.stratosphere.nephele.executiongraph.InternalJobStatus;
+import eu.stratosphere.nephele.executiongraph.JobStatusListener;
+import eu.stratosphere.nephele.instance.AllocatedResource;
+import eu.stratosphere.nephele.instance.AllocationID;
+import eu.stratosphere.nephele.instance.DummyInstance;
+import eu.stratosphere.nephele.instance.InstanceException;
+import eu.stratosphere.nephele.instance.InstanceListener;
+import eu.stratosphere.nephele.instance.InstanceManager;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import eu.stratosphere.nephele.execution.ExecutionState;
+import eu.stratosphere.nephele.instance.Instance;
+import eu.stratosphere.nephele.jobgraph.JobID;
+import eu.stratosphere.nephele.jobmanager.DeploymentManager;
+import eu.stratosphere.util.StringUtils;
+
+/**
+ * The default scheduler for Nephele. While Nephele's
+ * {@link eu.stratosphere.nephele.jobmanager.JobManager} is responsible for requesting the required instances for the
+ * job at the {@link eu.stratosphere.nephele.instance.InstanceManager}, the scheduler is in charge of assigning the
+ * individual tasks to the instances.
+ *
+ */
+public class DefaultScheduler implements InstanceListener, JobStatusListener, ExecutionStageListener {
+
+ /**
+ * The LOG object to report events within the scheduler.
+ */
+ protected static final Log LOG = LogFactory.getLog(DefaultScheduler.class);
+
+ /**
+ * The instance manager assigned to this scheduler.
+ */
+ private final InstanceManager instanceManager;
+
+ /**
+ * The deployment manager assigned to this scheduler.
+ */
+ private final DeploymentManager deploymentManager;
+
+ /**
+ * Stores the vertices to be restarted once they have switched to the <code>CANCELED</code> state.
+ */
+ private final Map<ExecutionVertexID, ExecutionVertex> verticesToBeRestarted = new ConcurrentHashMap<ExecutionVertexID, ExecutionVertex>();
+
+ /**
+ * The job queue where all submitted jobs go to.
+ */
+ private Deque<ExecutionGraph> jobQueue = new ArrayDeque<ExecutionGraph>();
+
+ /**
+ * Constructs a new abstract scheduler.
+ *
+ * @param deploymentManager
+ * the deployment manager assigned to this scheduler
+ * @param instanceManager
+ * the instance manager to be used with this scheduler
+ */
+ public DefaultScheduler(final DeploymentManager deploymentManager, final InstanceManager instanceManager) {
+
+ this.deploymentManager = deploymentManager;
+ this.instanceManager = instanceManager;
+ this.instanceManager.setInstanceListener(this);
+ }
+
+ /**
+ * Removes the job represented by the given {@link ExecutionGraph} from the scheduler.
+ *
+ * @param executionGraphToRemove
+ * the job to be removed
+ */
+ void removeJobFromSchedule(final ExecutionGraph executionGraphToRemove) {
+
+ boolean removedFromQueue = false;
+
+ synchronized (this.jobQueue) {
+
+ final Iterator<ExecutionGraph> it = this.jobQueue.iterator();
+ while (it.hasNext()) {
+
+ final ExecutionGraph executionGraph = it.next();
+ if (executionGraph.getJobID().equals(executionGraphToRemove.getJobID())) {
+ removedFromQueue = true;
+ it.remove();
+ break;
+ }
+ }
+ }
+
+ if (!removedFromQueue) {
+ LOG.error("Cannot find job " + executionGraphToRemove.getJobName() + " ("
+ + executionGraphToRemove.getJobID() + ") to remove");
+ }
+ }
+
+ /**
+ * Adds a job represented by an {@link ExecutionGraph} object to the scheduler. The job is then executed according
+ * to the strategies of the concrete scheduler implementation.
+ *
+ * @param executionGraph
+ * the job to be added to the scheduler
+ * @throws SchedulingException
+ * thrown if an error occurs and the scheduler does not accept the new job
+ */
+ public void scheduleJob(final ExecutionGraph executionGraph) throws SchedulingException {
+
+ final int requiredSlots = executionGraph.getRequiredSlots();
+ final int availableSlots = this.getInstanceManager().getNumberOfSlots();
+
+ if(requiredSlots > availableSlots){
+ throw new SchedulingException("Not enough slots to schedule job " + executionGraph.getJobID());
+ }
+
+ // Subscribe to job status notifications
+ executionGraph.registerJobStatusListener(this);
+
+ // Register execution listener for each vertex
+ final ExecutionGraphIterator it2 = new ExecutionGraphIterator(executionGraph, true);
+ while (it2.hasNext()) {
+
+ final ExecutionVertex vertex = it2.next();
+ vertex.registerExecutionListener(new DefaultExecutionListener(this, vertex));
+ }
+
+ // Register the scheduler as an execution stage listener
+ executionGraph.registerExecutionStageListener(this);
+
+ // Add job to the job queue (important to add job to queue before requesting instances)
+ synchronized (this.jobQueue) {
+ this.jobQueue.add(executionGraph);
+ }
+
+ // Request resources for the first stage of the job
+
+ final ExecutionStage executionStage = executionGraph.getCurrentExecutionStage();
+ try {
+ requestInstances(executionStage);
+ } catch (InstanceException e) {
+ final String exceptionMessage = StringUtils.stringifyException(e);
+ LOG.error(exceptionMessage);
+ this.jobQueue.remove(executionGraph);
+ throw new SchedulingException(exceptionMessage);
+ }
+ }
+
+ /**
+ * Returns the execution graph which is associated with the given job ID.
+ *
+ * @param jobID
+ * the job ID to search the execution graph for
+ * @return the execution graph which belongs to the given job ID or <code>null</code if no such execution graph
+ * exists
+ */
+ public ExecutionGraph getExecutionGraphByID(final JobID jobID) {
+
+ synchronized (this.jobQueue) {
+
+ final Iterator<ExecutionGraph> it = this.jobQueue.iterator();
+ while (it.hasNext()) {
+
+ final ExecutionGraph executionGraph = it.next();
+ if (executionGraph.getJobID().equals(jobID)) {
+ return executionGraph;
+ }
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Shuts the scheduler down. After shut down no jobs can be added to the scheduler.
+ */
+ public void shutdown() {
+
+ synchronized (this.jobQueue) {
+ this.jobQueue.clear();
+ }
+
+ }
+
+ public void jobStatusHasChanged(final ExecutionGraph executionGraph, final InternalJobStatus newJobStatus,
+ final String optionalMessage) {
+
+ if (newJobStatus == InternalJobStatus.FAILED || newJobStatus == InternalJobStatus.FINISHED
+ || newJobStatus == InternalJobStatus.CANCELED) {
+ removeJobFromSchedule(executionGraph);
+ }
+ }
+
+ public void nextExecutionStageEntered(final JobID jobID, final ExecutionStage executionStage) {
+
+ // Request new instances if necessary
+ try {
+ requestInstances(executionStage);
+ } catch (InstanceException e) {
+ // TODO: Handle error correctly
+ LOG.error(StringUtils.stringifyException(e));
+ }
+
+ // Deploy the assigned vertices
+ deployAssignedInputVertices(executionStage.getExecutionGraph());
+ }
+
+
+ /**
+ * Returns the {@link eu.stratosphere.nephele.instance.InstanceManager} object which is used by the current scheduler.
+ *
+ * @return the {@link eu.stratosphere.nephele.instance.InstanceManager} object which is used by the current scheduler
+ */
+ public InstanceManager getInstanceManager() {
+ return this.instanceManager;
+ }
+
+
+ /**
+ * Collects the instances required to run the job from the given {@link ExecutionStage} and requests them at the
+ * loaded instance manager.
+ *
+ * @param executionStage
+ * the execution stage to collect the required instances from
+ * @throws InstanceException
+ * thrown if the given execution graph is already processing its final stage
+ */
+ protected void requestInstances(final ExecutionStage executionStage) throws InstanceException {
+
+ final ExecutionGraph executionGraph = executionStage.getExecutionGraph();
+
+ synchronized (executionStage) {
+
+ final int requiredSlots = executionStage.getRequiredSlots();
+
+ LOG.info("Requesting " + requiredSlots + " for job " + executionGraph.getJobID());
+
+ this.instanceManager.requestInstance(executionGraph.getJobID(), executionGraph.getJobConfiguration(),
+ requiredSlots);
+
+ // Switch vertex state to assigning
+ final ExecutionGraphIterator it2 = new ExecutionGraphIterator(executionGraph, executionGraph
+ .getIndexOfCurrentExecutionStage(), true, true);
+ while (it2.hasNext()) {
+
+ it2.next().compareAndUpdateExecutionState(ExecutionState.CREATED, ExecutionState.SCHEDULED);
+ }
+ }
+ }
+
+ void findVerticesToBeDeployed(final ExecutionVertex vertex,
+ final Map<Instance, List<ExecutionVertex>> verticesToBeDeployed,
+ final Set<ExecutionVertex> alreadyVisited) {
+
+ if (!alreadyVisited.add(vertex)) {
+ return;
+ }
+
+ if (vertex.compareAndUpdateExecutionState(ExecutionState.ASSIGNED, ExecutionState.READY)) {
+ final Instance instance = vertex.getAllocatedResource().getInstance();
+
+ if (instance instanceof DummyInstance) {
+ LOG.error("Inconsistency: Vertex " + vertex + " is about to be deployed on a DummyInstance");
+ }
+
+ List<ExecutionVertex> verticesForInstance = verticesToBeDeployed.get(instance);
+ if (verticesForInstance == null) {
+ verticesForInstance = new ArrayList<ExecutionVertex>();
+ verticesToBeDeployed.put(instance, verticesForInstance);
+ }
+
+ verticesForInstance.add(vertex);
+ }
+
+ final int numberOfOutputGates = vertex.getNumberOfOutputGates();
+ for (int i = 0; i < numberOfOutputGates; ++i) {
+
+ final ExecutionGate outputGate = vertex.getOutputGate(i);
+ boolean deployTarget;
+
+ switch (outputGate.getChannelType()) {
+ case NETWORK:
+ deployTarget = false;
+ break;
+ case IN_MEMORY:
+ deployTarget = true;
+ break;
+ default:
+ throw new IllegalStateException("Unknown channel type");
+ }
+
+ if (deployTarget) {
+
+ final int numberOfOutputChannels = outputGate.getNumberOfEdges();
+ for (int j = 0; j < numberOfOutputChannels; ++j) {
+ final ExecutionEdge outputChannel = outputGate.getEdge(j);
+ final ExecutionVertex connectedVertex = outputChannel.getInputGate().getVertex();
+ findVerticesToBeDeployed(connectedVertex, verticesToBeDeployed, alreadyVisited);
+ }
+ }
+ }
+ }
+
+ /**
+ * Collects all execution vertices with the state ASSIGNED starting from the given start vertex and
+ * deploys them on the assigned {@link eu.stratosphere.nephele.instance.AllocatedResource} objects.
+ *
+ * @param startVertex
+ * the execution vertex to start the deployment from
+ */
+ public void deployAssignedVertices(final ExecutionVertex startVertex) {
+
+ final JobID jobID = startVertex.getExecutionGraph().getJobID();
+
+ final Map<Instance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<Instance, List<ExecutionVertex>>();
+ final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
+
+ findVerticesToBeDeployed(startVertex, verticesToBeDeployed, alreadyVisited);
+
+ if (!verticesToBeDeployed.isEmpty()) {
+
+ final Iterator<Map.Entry<Instance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
+ .entrySet()
+ .iterator();
+
+ while (it2.hasNext()) {
+
+ final Map.Entry<Instance, List<ExecutionVertex>> entry = it2.next();
+ this.deploymentManager.deploy(jobID, entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+ /**
+ * Collects all execution vertices with the state ASSIGNED from the given pipeline and deploys them on the assigned
+ * {@link eu.stratosphere.nephele.instance.AllocatedResource} objects.
+ *
+ * @param pipeline
+ * the execution pipeline to be deployed
+ */
+ public void deployAssignedPipeline(final ExecutionPipeline pipeline) {
+
+ final JobID jobID = null;
+
+ final Map<Instance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<Instance, List<ExecutionVertex>>();
+ final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
+
+ final Iterator<ExecutionVertex> it = pipeline.iterator();
+ while (it.hasNext()) {
+ findVerticesToBeDeployed(it.next(), verticesToBeDeployed, alreadyVisited);
+ }
+
+ if (!verticesToBeDeployed.isEmpty()) {
+
+ final Iterator<Map.Entry<Instance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
+ .entrySet()
+ .iterator();
+
+ while (it2.hasNext()) {
+
+ final Map.Entry<Instance, List<ExecutionVertex>> entry = it2.next();
+ this.deploymentManager.deploy(jobID, entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+ /**
+ * Collects all execution vertices with the state ASSIGNED starting from the given collection of start vertices and
+ * deploys them on the assigned {@link eu.stratosphere.nephele.instance.AllocatedResource} objects.
+ *
+ * @param startVertices
+ * the collection of execution vertices to start the deployment from
+ */
+ public void deployAssignedVertices(final Collection<ExecutionVertex> startVertices) {
+
+ JobID jobID = null;
+
+ final Map<Instance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<Instance, List<ExecutionVertex>>();
+ final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
+
+ for (final ExecutionVertex startVertex : startVertices) {
+
+ if (jobID == null) {
+ jobID = startVertex.getExecutionGraph().getJobID();
+ }
+
+ findVerticesToBeDeployed(startVertex, verticesToBeDeployed, alreadyVisited);
+ }
+
+ if (!verticesToBeDeployed.isEmpty()) {
+
+ final Iterator<Map.Entry<Instance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
+ .entrySet()
+ .iterator();
+
+ while (it2.hasNext()) {
+
+ final Map.Entry<Instance, List<ExecutionVertex>> entry = it2.next();
+ this.deploymentManager.deploy(jobID, entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+ /**
+ * Collects all execution vertices with the state ASSIGNED starting from the input vertices of the current execution
+ * stage and deploys them on the assigned {@link eu.stratosphere.nephele.instance.AllocatedResource} objects.
+ *
+ * @param executionGraph
+ * the execution graph to collect the vertices from
+ */
+ public void deployAssignedInputVertices(final ExecutionGraph executionGraph) {
+
+ final Map<Instance, List<ExecutionVertex>> verticesToBeDeployed = new HashMap<Instance, List<ExecutionVertex>>();
+ final ExecutionStage executionStage = executionGraph.getCurrentExecutionStage();
+
+ final Set<ExecutionVertex> alreadyVisited = new HashSet<ExecutionVertex>();
+
+ for (int i = 0; i < executionStage.getNumberOfStageMembers(); ++i) {
+
+ final ExecutionGroupVertex startVertex = executionStage.getStageMember(i);
+ if (!startVertex.isInputVertex()) {
+ continue;
+ }
+
+ for (int j = 0; j < startVertex.getCurrentNumberOfGroupMembers(); ++j) {
+ final ExecutionVertex vertex = startVertex.getGroupMember(j);
+ findVerticesToBeDeployed(vertex, verticesToBeDeployed, alreadyVisited);
+ }
+ }
+
+ if (!verticesToBeDeployed.isEmpty()) {
+
+ final Iterator<Map.Entry<Instance, List<ExecutionVertex>>> it2 = verticesToBeDeployed
+ .entrySet()
+ .iterator();
+
+ while (it2.hasNext()) {
+
+ final Map.Entry<Instance, List<ExecutionVertex>> entry = it2.next();
+ this.deploymentManager.deploy(executionGraph.getJobID(), entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+
+ @Override
+ public void resourcesAllocated(final JobID jobID, final List<AllocatedResource> allocatedResources) {
+
+ if (allocatedResources == null) {
+ LOG.error("Resource to lock is null!");
+ return;
+ }
+
+ for (final AllocatedResource allocatedResource : allocatedResources) {
+ if (allocatedResource.getInstance() instanceof DummyInstance) {
+ LOG.debug("Available instance is of type DummyInstance!");
+ return;
+ }
+ }
+
+ final ExecutionGraph eg = getExecutionGraphByID(jobID);
+
+ if (eg == null) {
+ /*
+ * The job have have been canceled in the meantime, in this case
+ * we release the instance immediately.
+ */
+ try {
+ for (final AllocatedResource allocatedResource : allocatedResources) {
+ getInstanceManager().releaseAllocatedResource(allocatedResource);
+ }
+ } catch (InstanceException e) {
+ LOG.error(e);
+ }
+ return;
+ }
+
+ final Runnable command = new Runnable() {
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void run() {
+
+ final ExecutionStage stage = eg.getCurrentExecutionStage();
+
+ synchronized (stage) {
+
+ for (final AllocatedResource allocatedResource : allocatedResources) {
+
+ AllocatedResource resourceToBeReplaced = null;
+ // Important: only look for instances to be replaced in the current stage
+ final Iterator<ExecutionGroupVertex> groupIterator = new ExecutionGroupVertexIterator(eg, true,
+ stage.getStageNumber());
+ while (groupIterator.hasNext()) {
+
+ final ExecutionGroupVertex groupVertex = groupIterator.next();
+ for (int i = 0; i < groupVertex.getCurrentNumberOfGroupMembers(); ++i) {
+
+ final ExecutionVertex vertex = groupVertex.getGroupMember(i);
+
+ if (vertex.getExecutionState() == ExecutionState.SCHEDULED
+ && vertex.getAllocatedResource() != null) {
+ resourceToBeReplaced = vertex.getAllocatedResource();
+ break;
+ }
+ }
+
+ if (resourceToBeReplaced != null) {
+ break;
+ }
+ }
+
+ // For some reason, we don't need this instance
+ if (resourceToBeReplaced == null) {
+ LOG.error("Instance " + allocatedResource.getInstance() + " is not required for job"
+ + eg.getJobID());
+ try {
+ getInstanceManager().releaseAllocatedResource(allocatedResource);
+ } catch (InstanceException e) {
+ LOG.error(e);
+ }
+ return;
+ }
+
+ // Replace the selected instance
+ final Iterator<ExecutionVertex> it = resourceToBeReplaced.assignedVertices();
+ while (it.hasNext()) {
+ final ExecutionVertex vertex = it.next();
+ vertex.setAllocatedResource(allocatedResource);
+ vertex.updateExecutionState(ExecutionState.ASSIGNED);
+ }
+ }
+ }
+
+ // Deploy the assigned vertices
+ deployAssignedInputVertices(eg);
+
+ }
+
+ };
+
+ eg.executeCommand(command);
+ }
+
+ /**
+ * Checks if the given {@link AllocatedResource} is still required for the
+ * execution of the given execution graph. If the resource is no longer
+ * assigned to a vertex that is either currently running or about to run
+ * the given resource is returned to the instance manager for deallocation.
+ *
+ * @param executionGraph
+ * the execution graph the provided resource has been used for so far
+ * @param allocatedResource
+ * the allocated resource to check the assignment for
+ */
+ public void checkAndReleaseAllocatedResource(final ExecutionGraph executionGraph,
+ final AllocatedResource allocatedResource) {
+
+ if (allocatedResource == null) {
+ LOG.error("Resource to lock is null!");
+ return;
+ }
+
+ if (allocatedResource.getInstance() instanceof DummyInstance) {
+ LOG.debug("Available instance is of type DummyInstance!");
+ return;
+ }
+
+ boolean resourceCanBeReleased = true;
+ final Iterator<ExecutionVertex> it = allocatedResource.assignedVertices();
+ while (it.hasNext()) {
+ final ExecutionVertex vertex = it.next();
+ final ExecutionState state = vertex.getExecutionState();
+
+ if (state != ExecutionState.CREATED && state != ExecutionState.FINISHED
+ && state != ExecutionState.FAILED && state != ExecutionState.CANCELED) {
+
+ resourceCanBeReleased = false;
+ break;
+ }
+ }
+
+ if (resourceCanBeReleased) {
+
+ LOG.info("Releasing instance " + allocatedResource.getInstance());
+ try {
+ getInstanceManager().releaseAllocatedResource(allocatedResource);
+ } catch (InstanceException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ }
+ }
+
+ DeploymentManager getDeploymentManager() {
+ return this.deploymentManager;
+ }
+
+ protected void replayCheckpointsFromPreviousStage(final ExecutionGraph executionGraph) {
+
+ final int currentStageIndex = executionGraph.getIndexOfCurrentExecutionStage();
+ final ExecutionStage previousStage = executionGraph.getStage(currentStageIndex - 1);
+
+ final List<ExecutionVertex> verticesToBeReplayed = new ArrayList<ExecutionVertex>();
+
+ for (int i = 0; i < previousStage.getNumberOfOutputExecutionVertices(); ++i) {
+
+ final ExecutionVertex vertex = previousStage.getOutputExecutionVertex(i);
+ vertex.updateExecutionState(ExecutionState.ASSIGNED);
+ verticesToBeReplayed.add(vertex);
+ }
+
+ deployAssignedVertices(verticesToBeReplayed);
+ }
+
+ /**
+ * Returns a map of vertices to be restarted once they have switched to their <code>CANCELED</code> state.
+ *
+ * @return the map of vertices to be restarted
+ */
+ Map<ExecutionVertexID, ExecutionVertex> getVerticesToBeRestarted() {
+
+ return this.verticesToBeRestarted;
+ }
+
+
+ @Override
+ public void allocatedResourcesDied(final JobID jobID, final List<AllocatedResource> allocatedResources) {
+
+ final ExecutionGraph eg = getExecutionGraphByID(jobID);
+
+ if (eg == null) {
+ LOG.error("Cannot find execution graph for job with ID " + jobID);
+ return;
+ }
+
+ final Runnable command = new Runnable() {
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void run() {
+
+ synchronized (eg) {
+
+ for (final AllocatedResource allocatedResource : allocatedResources) {
+
+ LOG.info("Resource " + allocatedResource.getInstance().getName() + " for Job " + jobID
+ + " died.");
+
+ final ExecutionGraph executionGraph = getExecutionGraphByID(jobID);
+
+ if (executionGraph == null) {
+ LOG.error("Cannot find execution graph for job " + jobID);
+ return;
+ }
+
+ Iterator<ExecutionVertex> vertexIter = allocatedResource.assignedVertices();
+
+ // Assign vertices back to a dummy resource.
+ final DummyInstance dummyInstance = DummyInstance.createDummyInstance();
+ final AllocatedResource dummyResource = new AllocatedResource(dummyInstance,
+ new AllocationID());
+
+ while (vertexIter.hasNext()) {
+ final ExecutionVertex vertex = vertexIter.next();
+ vertex.setAllocatedResource(dummyResource);
+ }
+
+ final String failureMessage = allocatedResource.getInstance().getName() + " died";
+
+ vertexIter = allocatedResource.assignedVertices();
+
+ while (vertexIter.hasNext()) {
+ final ExecutionVertex vertex = vertexIter.next();
+ final ExecutionState state = vertex.getExecutionState();
+
+ switch (state) {
+ case ASSIGNED:
+ case READY:
+ case STARTING:
+ case RUNNING:
+ case FINISHING:
+
+ vertex.updateExecutionState(ExecutionState.FAILED, failureMessage);
+
+ break;
+ default:
+ }
+ }
+
+ // TODO: Fix this
+ /*
+ * try {
+ * requestInstances(this.executionVertex.getGroupVertex().getExecutionStage());
+ * } catch (InstanceException e) {
+ * e.printStackTrace();
+ * // TODO: Cancel the entire job in this case
+ * }
+ */
+ }
+ }
+
+ final InternalJobStatus js = eg.getJobStatus();
+ if (js != InternalJobStatus.FAILING && js != InternalJobStatus.FAILED) {
+
+ // TODO: Fix this
+ // deployAssignedVertices(eg);
+
+ final ExecutionStage stage = eg.getCurrentExecutionStage();
+
+ try {
+ requestInstances(stage);
+ } catch (InstanceException e) {
+ e.printStackTrace();
+ // TODO: Cancel the entire job in this case
+ }
+ }
+ }
+ };
+
+ eg.executeCommand(command);
+ }
+}
[14/22] git commit: Removed RuntimeEnvironment instantiation from
execution graph construction. Removed legacy job vertex classes and
input/output tasks.
Posted by se...@apache.org.
Removed RuntimeEnvironment instantiation from execution graph construction. Removed legacy job vertex classes and input/output tasks.
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/ea79186b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/ea79186b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/ea79186b
Branch: refs/heads/master
Commit: ea79186b7ef787991fa1c4dbfa29f26c7aefd804
Parents: 429493d
Author: Till Rohrmann <ti...@mailbox.tu-berlin.de>
Authored: Wed Mar 26 02:58:15 2014 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:20 2014 +0200
----------------------------------------------------------------------
.../plantranslate/NepheleJobGraphGenerator.java | 33 +--
.../api/common/io/FileOutputFormat.java | 38 ++++
.../api/common/io/OutputFormat.java | 7 +
.../configuration/Configuration.java | 12 ++
.../api/java/io/PrintingOutputFormat.java | 3 +
.../nephele/execution/RuntimeEnvironment.java | 56 ++----
.../nephele/executiongraph/ExecutionGraph.java | 86 ++------
.../executiongraph/ExecutionGroupVertex.java | 53 ++---
.../jobgraph/AbstractJobInputVertex.java | 7 +
.../nephele/jobgraph/AbstractJobVertex.java | 62 +-----
.../nephele/jobgraph/JobFileInputVertex.java | 195 ------------------
.../nephele/jobgraph/JobFileOutputVertex.java | 198 ------------------
.../nephele/jobgraph/JobGenericInputVertex.java | 168 ----------------
.../jobgraph/JobGenericOutputVertex.java | 182 -----------------
.../nephele/jobgraph/JobInputVertex.java | 90 ++++++++-
.../nephele/jobgraph/JobOutputVertex.java | 56 ++++++
.../nephele/jobgraph/JobTaskVertex.java | 17 --
.../splitassigner/InputSplitManager.java | 13 +-
.../LocatableInputSplitAssigner.java | 14 +-
.../file/FileInputSplitAssigner.java | 14 +-
.../nephele/template/AbstractFileInputTask.java | 201 -------------------
.../template/AbstractFileOutputTask.java | 46 -----
.../template/AbstractGenericInputTask.java | 39 ----
.../nephele/template/AbstractInputTask.java | 23 ---
.../nephele/template/AbstractInvokable.java | 33 ---
.../nephele/template/GenericInputTask.java | 39 ----
.../pact/runtime/task/DataSinkTask.java | 58 ------
.../pact/runtime/task/DataSourceTask.java | 60 +++---
.../pact/runtime/task/util/TaskConfig.java | 4 +
.../TaskDeploymentDescriptorTest.java | 8 +-
.../executiongraph/SelfCrossInputTask.java | 41 ----
.../nephele/jobmanager/DoubleSourceTask.java | 81 --------
.../nephele/jobmanager/DoubleTargetTask.java | 18 +-
.../nephele/jobmanager/ForwardTask.java | 12 +-
.../nephele/jobmanager/UnionTask.java | 16 +-
.../scheduler/queue/DefaultSchedulerTest.java | 43 ++++
.../nephele/util/FileLineReader.java | 80 --------
.../nephele/util/FileLineWriter.java | 75 -------
.../io/library/FileLineReadWriteTest.java | 136 -------------
.../recordJobs/util/DiscardingOutputFormat.java | 3 +
40 files changed, 412 insertions(+), 1908 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
index b4c7560..200ef7c 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/plantranslate/NepheleJobGraphGenerator.java
@@ -20,7 +20,14 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
+
+import eu.stratosphere.api.common.io.InputFormat;
+import eu.stratosphere.api.common.io.OutputFormat;
+import eu.stratosphere.api.common.operators.util.UserCodeWrapper;
+import eu.stratosphere.core.io.InputSplit;
+import eu.stratosphere.nephele.template.AbstractInputTask;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import eu.stratosphere.api.common.aggregators.AggregatorRegistry;
import eu.stratosphere.api.common.aggregators.AggregatorWithName;
@@ -805,31 +812,31 @@ public class NepheleJobGraphGenerator implements Visitor<PlanNode> {
private JobInputVertex createDataSourceVertex(SourcePlanNode node) throws CompilerException {
final JobInputVertex vertex = new JobInputVertex(node.getNodeName(), this.jobGraph);
- final TaskConfig config = new TaskConfig(vertex.getConfiguration());
-
+
// set task class
@SuppressWarnings("unchecked")
- final Class<AbstractInputTask<?>> clazz = (Class<AbstractInputTask<?>>) (Class<?>) DataSourceTask.class;
+ final Class<AbstractInputTask<?>> clazz = (Class<AbstractInputTask<?>>) (Class<?>) DataSourceTask
+ .class;
vertex.setInputClass(clazz);
// set user code
- config.setStubWrapper(node.getPactContract().getUserCodeWrapper());
- config.setStubParameters(node.getPactContract().getParameters());
-
- config.setOutputSerializer(node.getSerializer());
+ vertex.setInputFormat((UserCodeWrapper<? extends InputFormat<?, InputSplit>>)node.getPactContract()
+ .getUserCodeWrapper());
+ vertex.setInputFormatParameters(node.getPactContract().getParameters());
+ vertex.setOutputSerializer(node.getSerializer());
return vertex;
}
private AbstractJobOutputVertex createDataSinkVertex(SinkPlanNode node) throws CompilerException {
final JobOutputVertex vertex = new JobOutputVertex(node.getNodeName(), this.jobGraph);
- final TaskConfig config = new TaskConfig(vertex.getConfiguration());
-
+
vertex.setOutputClass(DataSinkTask.class);
vertex.getConfiguration().setInteger(DataSinkTask.DEGREE_OF_PARALLELISM_KEY, node.getDegreeOfParallelism());
-
+
// set user code
- config.setStubWrapper(node.getPactContract().getUserCodeWrapper());
- config.setStubParameters(node.getPactContract().getParameters());
+ vertex.setOutputFormat((UserCodeWrapper<? extends OutputFormat<?>>)node.getPactContract().getUserCodeWrapper
+ ());
+ vertex.setOutputFormatParameters(node.getPactContract().getParameters());
return vertex;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
index b04ced9..7733c71 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/FileOutputFormat.java
@@ -437,4 +437,42 @@ public abstract class FileOutputFormat<IT> implements OutputFormat<IT> {
super(targetConfig);
}
}
+
+ @Override
+ public void initialize(Configuration configuration){
+ final Path path = this.getOutputFilePath();
+ final WriteMode writeMode = this.getWriteMode();
+ final OutputDirectoryMode outDirMode = this.getOutputDirectoryMode();
+
+ // Prepare output path and determine max DOP
+ try {
+ final FileSystem fs = path.getFileSystem();
+
+ int dop = configuration.getInteger(DEGREE_OF_PARALLELISM_KEY, -1);
+ if(dop == 1 && outDirMode == OutputDirectoryMode.PARONLY) {
+ // output is not written in parallel and should be written to a single file.
+
+ if(fs.isDistributedFS()) {
+ // prepare distributed output path
+ if(!fs.initOutPathDistFS(path, writeMode, false)) {
+ // output preparation failed! Cancel task.
+ throw new IOException("Output path could not be initialized.");
+ }
+ }
+ } else {
+ // output should be written to a directory
+
+ if(fs.isDistributedFS()) {
+ // only distributed file systems can be initialized at start-up time.
+ if(!fs.initOutPathDistFS(path, writeMode, true)) {
+ throw new IOException("Output directory could not be created.");
+ }
+ }
+ }
+ }
+ catch (IOException e) {
+ LOG.error("Could not access the file system to detemine the status of the output.", e);
+ throw new RuntimeException("I/O Error while accessing file", e);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
index c32be78..3b66902 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/api/common/io/OutputFormat.java
@@ -79,5 +79,12 @@ public interface OutputFormat<IT> extends Serializable {
* @throws IOException Thrown, if the input could not be closed properly.
*/
void close() throws IOException;
+
+ /**
+ * Method which is called on the master node prior to execution. It can be used to set up the output format.
+ *
+ * @param configuration The task configuration
+ */
+ void initialize(Configuration configuration);
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
index 46cadc3..0271b59 100644
--- a/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
+++ b/stratosphere-core/src/main/java/eu/stratosphere/configuration/Configuration.java
@@ -405,6 +405,18 @@ public class Configuration implements IOReadableWritable {
}
}
}
+
+ /**
+ * Checks whether there is an entry with key
+ *
+ * @param key key of entry
+ * @return true if entry with key is stored in the configuration, otherwise false
+ */
+ public boolean containsKey(String key){
+ synchronized (this.confData){
+ return this.confData.containsKey(key);
+ }
+ }
// --------------------------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java b/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
index 5c09439..d1736d4 100644
--- a/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
+++ b/stratosphere-java/src/main/java/eu/stratosphere/api/java/io/PrintingOutputFormat.java
@@ -95,4 +95,7 @@ public class PrintingOutputFormat<T> implements OutputFormat<T> {
public String toString() {
return "Print to " + (target == STD_OUT ? "System.out" : "System.err");
}
+
+ @Override
+ public void initialize(Configuration configuration){}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
index 4e07694..70718a9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/execution/RuntimeEnvironment.java
@@ -83,6 +83,12 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
private final List<InputGate<? extends IOReadableWritable>> inputGates = new CopyOnWriteArrayList<InputGate<? extends IOReadableWritable>>();
/**
+ * Queue of unbound output gate IDs which are required for deserializing an environment in the course of an RPC
+ * call.
+ */
+ private final Queue<GateID> unboundOutputGateIDs = new ArrayDeque<GateID>();
+
+ /**
* Queue of unbound input gate IDs which are required for deserializing an environment in the course of an RPC
* call.
*/
@@ -165,46 +171,18 @@ public class RuntimeEnvironment implements Environment, BufferProvider, LocalBuf
private volatile boolean canceled;
/**
- * Creates a new runtime environment object which contains the runtime information for the encapsulated Nephele
- * task.
- *
- * @param jobID the ID of the original Nephele job
- * @param taskName the name of task running in this environment
- * @param invokableClass invokableClass the class that should be instantiated as a Nephele task
- * @param taskConfiguration the configuration object which was attached to the original JobVertex
- * @param jobConfiguration the configuration object which was attached to the original JobGraph
- * @throws Exception thrown if an error occurs while instantiating the invokable class
- */
- public RuntimeEnvironment(final JobID jobID, final String taskName,
- final Class<? extends AbstractInvokable> invokableClass, final Configuration taskConfiguration,
- final Configuration jobConfiguration)
- throws Exception
- {
- this.jobID = jobID;
- this.taskName = taskName;
- this.invokableClass = invokableClass;
- this.taskConfiguration = taskConfiguration;
- this.jobConfiguration = jobConfiguration;
- this.indexInSubtaskGroup = 0;
- this.currentNumberOfSubtasks = 0;
- this.memoryManager = null;
- this.ioManager = null;
- this.inputSplitProvider = null;
- this.cacheCopyTasks = new HashMap<String, FutureTask<Path>>();
-
- this.invokable = this.invokableClass.newInstance();
- this.invokable.setEnvironment(this);
- this.invokable.registerInputOutput();
- }
-
- /**
* Constructs a runtime environment from a task deployment description.
- *
- * @param tdd the task deployment description
- * @param memoryManager the task manager's memory manager component
- * @param ioManager the task manager's I/O manager component
- * @param inputSplitProvider the input split provider for this environment
- * @throws Exception thrown if an error occurs while instantiating the invokable class
+ *
+ * @param tdd
+ * the task deployment description
+ * @param memoryManager
+ * the task manager's memory manager component
+ * @param ioManager
+ * the task manager's I/O manager component
+ * @param inputSplitProvider
+ * the input split provider for this environment
+ * @throws Exception
+ * thrown if an error occurs while instantiating the invokable class
*/
public RuntimeEnvironment(final TaskDeploymentDescriptor tdd,
final MemoryManager memoryManager, final IOManager ioManager,
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
index c5059f9..93e0a25 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
@@ -399,17 +399,6 @@ public class ExecutionGraph implements ExecutionListener {
final ExecutionVertex sev = entry.getValue();
final ExecutionGroupVertex sgv = sev.getGroupVertex();
- // First compare number of output gates
- if (sjv.getNumberOfForwardConnections() != sgv.getEnvironment().getNumberOfOutputGates()) {
- throw new GraphConversionException("Job and execution vertex " + sjv.getName()
- + " have different number of outputs");
- }
-
- if (sjv.getNumberOfBackwardConnections() != sgv.getEnvironment().getNumberOfInputGates()) {
- throw new GraphConversionException("Job and execution vertex " + sjv.getName()
- + " have different number of inputs");
- }
-
// First, build the group edges
for (int i = 0; i < sjv.getNumberOfForwardConnections(); ++i) {
final JobEdge edge = sjv.getForwardConnection(i);
@@ -488,16 +477,13 @@ public class ExecutionGraph implements ExecutionListener {
final InputSplit[] inputSplits;
- // let the task code compute the input splits
- if (groupVertex.getEnvironment().getInvokable() instanceof AbstractInputTask) {
- try {
- inputSplits = ((AbstractInputTask<?>) groupVertex.getEnvironment().getInvokable())
- .computeInputSplits(jobVertex.getNumberOfSubtasks());
- } catch (Exception e) {
- throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName(), e);
- }
- } else {
- throw new GraphConversionException("JobInputVertex contained a task class which was not an input task.");
+ final Class<? extends InputSplit> inputSplitType = jobInputVertex.getInputSplitType();
+
+ try{
+ inputSplits = jobInputVertex.getInputSplits(jobVertex.getNumberOfSubtasks());
+ }catch(Exception e) {
+ throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName() + ": "
+ + StringUtils.stringifyException(e));
}
if (inputSplits == null) {
@@ -507,13 +493,19 @@ public class ExecutionGraph implements ExecutionListener {
+ " input splits");
}
- // assign input splits
+ // assign input splits and type
groupVertex.setInputSplits(inputSplits);
+ groupVertex.setInputSplitType(inputSplitType);
}
- // TODO: This is a quick workaround, problem can be solved in a more generic way
- if (jobVertex instanceof JobFileOutputVertex) {
- final JobFileOutputVertex jbov = (JobFileOutputVertex) jobVertex;
- jobVertex.getConfiguration().setString("outputPath", jbov.getFilePath().toString());
+
+ if(jobVertex instanceof JobOutputVertex){
+ final JobOutputVertex jobOutputVertex = (JobOutputVertex) jobVertex;
+
+ final OutputFormat<?> outputFormat = jobOutputVertex.getOutputFormat();
+
+ if(outputFormat != null){
+ outputFormat.initialize(groupVertex.getConfiguration());
+ }
}
// Add group vertex to initial execution stage
@@ -796,48 +788,6 @@ public class ExecutionGraph implements ExecutionListener {
}
/**
- * Retrieves the maximum parallel degree of the job represented by this execution graph
- */
- public int getMaxNumberSubtasks() {
- int maxDegree = 0;
- final Iterator<ExecutionStage> stageIterator = this.stages.iterator();
-
- while(stageIterator.hasNext()){
- final ExecutionStage stage = stageIterator.next();
-
- int maxPerStageDegree = stage.getMaxNumberSubtasks();
-
- if(maxPerStageDegree > maxDegree){
- maxDegree = maxPerStageDegree;
- }
- }
-
- return maxDegree;
- }
-
- /**
- * Retrieves the number of required slots to run this execution graph
- * @return
- */
- public int getRequiredSlots(){
- int maxRequiredSlots = 0;
-
- final Iterator<ExecutionStage> stageIterator = this.stages.iterator();
-
- while(stageIterator.hasNext()){
- final ExecutionStage stage = stageIterator.next();
-
- int requiredSlots = stage.getRequiredSlots();
-
- if(requiredSlots > maxRequiredSlots){
- maxRequiredSlots = requiredSlots;
- }
- }
-
- return maxRequiredSlots;
- }
-
- /**
* Returns the stage which is currently executed.
*
* @return the currently executed stage or <code>null</code> if the job execution is already completed
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
index c865609..91e9e53 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGroupVertex.java
@@ -114,6 +114,11 @@ public final class ExecutionGroupVertex {
private volatile InputSplit[] inputSplits = null;
/**
+ * Input split type
+ */
+ private volatile Class<? extends InputSplit> inputSplitType = null;
+
+ /**
* The execution stage this vertex belongs to.
*/
private volatile ExecutionStage executionStage = null;
@@ -129,11 +134,6 @@ public final class ExecutionGroupVertex {
private final Class<? extends AbstractInvokable> invokableClass;
/**
- * The environment created to execute the vertex's task.
- */
- private final RuntimeEnvironment environment;
-
- /**
* Constructs a new group vertex.
*
* @param name
@@ -177,9 +177,6 @@ public final class ExecutionGroupVertex {
this.executionSignature = signature;
this.invokableClass = invokableClass;
-
- this.environment = new RuntimeEnvironment(executionGraph.getJobID(), name, invokableClass, configuration,
- executionGraph.getJobConfiguration());
}
/**
@@ -192,16 +189,6 @@ public final class ExecutionGroupVertex {
}
/**
- * Returns the environment of the instantiated {@link AbstractInvokable} object.
- *
- * @return the environment of the instantiated {@link AbstractInvokable} object
- */
- public RuntimeEnvironment getEnvironment() {
-
- return this.environment;
- }
-
- /**
* Sets the execution stage this group vertex is associated with.
*
* @param executionStage
@@ -407,20 +394,6 @@ public final class ExecutionGroupVertex {
}
}
- // Make sure the value of newNumber is valid
- // TODO: Move these checks to some other place
- /*
- * if (this.getMinimumNumberOfGroupMember() < 1) {
- * throw new GraphConversionException("The minimum number of members is below 1 for group vertex "
- * + this.getName());
- * }
- * if ((this.getMaximumNumberOfGroupMembers() != -1)
- * && (this.getMaximumNumberOfGroupMembers() < this.getMinimumNumberOfGroupMember())) {
- * throw new GraphConversionException(
- * "The maximum number of members is smaller than the minimum for group vertex " + this.getName());
- * }
- */
-
final ExecutionVertex originalVertex = this.getGroupMember(0);
int currentNumberOfExecutionVertices = this.getCurrentNumberOfGroupMembers();
@@ -453,6 +426,14 @@ public final class ExecutionGroupVertex {
}
/**
+ * Sets the input split type class
+ *
+ * @param inputSplitType Input split type class
+ */
+ public void setInputSplitType(final Class<? extends InputSplit> inputSplitType) { this.inputSplitType =
+ inputSplitType; }
+
+ /**
* Returns the input splits assigned to this group vertex.
*
* @return the input splits, possibly <code>null</code> if the group vertex does not represent an input vertex
@@ -462,6 +443,14 @@ public final class ExecutionGroupVertex {
return this.inputSplits;
}
+ /**
+ * Returns the input split type class
+ *
+ * @return the input split type class, possibly <code>null</code> if the group vertex does not represent an input
+ * vertex
+ */
+ public Class<? extends InputSplit> getInputSplitType() { return this.inputSplitType; }
+
public ExecutionGroupEdge getForwardEdge(int index) {
if (index < 0) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
index 958ed9d..22b4d7c 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobInputVertex.java
@@ -13,6 +13,10 @@
package eu.stratosphere.nephele.jobgraph;
+import eu.stratosphere.core.io.InputSplit;
+
+import java.io.IOException;
+
/**
* An abstract base class for input vertices in Nephele.
*
@@ -34,4 +38,7 @@ public abstract class AbstractJobInputVertex extends AbstractJobVertex {
jobGraph.addVertex(this);
}
+
+ public abstract Class<? extends InputSplit> getInputSplitType();
+ public abstract InputSplit[] getInputSplits(int minNumSplits) throws Exception;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
index d64c622..7cec46a 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/AbstractJobVertex.java
@@ -573,65 +573,15 @@ public abstract class AbstractJobVertex implements IOReadableWritable {
}
/**
- * Performs task specific checks if the
- * respective task has been configured properly.
- *
- * @param invokable
- * an instance of the task this vertex represents
+ * Performs check whether the vertex has been properly configured
+ *
+ * @param configuration
+ * configuration of this vertex
* @throws IllegalConfigurationException
* thrown if the respective tasks is not configured properly
*/
- public void checkConfiguration(final AbstractInvokable invokable) throws IllegalConfigurationException {
-
- if (invokable == null) {
- throw new IllegalArgumentException("Argument invokable is null");
- }
-
- // see if the task itself has a valid configuration
- // because this is user code running on the master, we embed it in a catch-all block
- try {
- invokable.checkConfiguration();
- } catch (IllegalConfigurationException icex) {
- throw icex; // simply forward
- } catch (Throwable t) {
- throw new IllegalConfigurationException("Checking the invokable's configuration caused an error: "
- + StringUtils.stringifyException(t));
- }
- }
-
- /**
- * Returns the minimum number of subtasks the respective task
- * must be split into at runtime.
- *
- * @param invokable
- * an instance of the task this vertex represents
- * @return the minimum number of subtasks the respective task must be split into at runtime
- */
- public int getMinimumNumberOfSubtasks(final AbstractInvokable invokable) {
-
- if (invokable == null) {
- throw new IllegalArgumentException("Argument invokable is null");
- }
-
- return invokable.getMinimumNumberOfSubtasks();
- }
-
- /**
- * Returns the maximum number of subtasks the respective task
- * can be split into at runtime.
- *
- * @param invokable
- * an instance of the task this vertex represents
- * @return the maximum number of subtasks the respective task can be split into at runtime, <code>-1</code> for
- * infinity
- */
- public int getMaximumNumberOfSubtasks(final AbstractInvokable invokable) {
-
- if (invokable == null) {
- throw new IllegalArgumentException("Argument invokable is null");
- }
-
- return invokable.getMaximumNumberOfSubtasks();
+ public void checkConfiguration(final Configuration configuration) throws IllegalConfigurationException {
+ //default configuration check
}
/**
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileInputVertex.java
deleted file mode 100644
index 65685ee..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileInputVertex.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobgraph;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import eu.stratosphere.configuration.IllegalConfigurationException;
-import eu.stratosphere.core.fs.FileStatus;
-import eu.stratosphere.core.fs.FileSystem;
-import eu.stratosphere.core.fs.Path;
-import eu.stratosphere.nephele.template.AbstractFileInputTask;
-import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.util.StringUtils;
-
-/**
- * A JobFileInputVertex is a specific subtype of a {@link AbstractJobInputVertex} and is designed
- * for Nephele tasks which read data from a local or distributed file system. As every job input vertex
- * A JobFileInputVertex must not have any further input.
- *
- */
-public final class JobFileInputVertex extends AbstractJobInputVertex {
-
- /**
- * The path pointing to the input file/directory.
- */
- private Path path = null;
-
- /**
- * Creates a new job file input vertex with the specified name.
- *
- * @param name
- * the name of the new job file input vertex
- * @param id
- * the ID of this vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobFileInputVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
- super(name, id, jobGraph);
- }
-
- /**
- * Creates a new job file input vertex with the specified name.
- *
- * @param name
- * the name of the new job file input vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobFileInputVertex(final String name, final JobGraph jobGraph) {
- super(name, null, jobGraph);
- }
-
- /**
- * Creates a new job file input vertex.
- *
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobFileInputVertex(final JobGraph jobGraph) {
- super(null, null, jobGraph);
- }
-
- /**
- * Sets the path of the file the job file input vertex's task should read from.
- *
- * @param path
- * the path of the file the job file input vertex's task should read from
- */
- public void setFilePath(final Path path) {
- this.path = path;
- }
-
- /**
- * Returns the path of the file the job file input vertex's task should read from.
- *
- * @return the path of the file the job file input vertex's task should read from or <code>null</code> if no path
- * has yet been set
- */
- public Path getFilePath() {
- return this.path;
- }
-
- /**
- * Sets the class of the vertex's input task.
- *
- * @param inputClass
- * the class of the vertex's input task.
- */
- public void setFileInputClass(final Class<? extends AbstractFileInputTask> inputClass) {
- this.invokableClass = inputClass;
- }
-
- /**
- * Returns the class of the vertex's input task.
- *
- * @return the class of the vertex's input task or <code>null</code> if no task has yet been set
- */
- @SuppressWarnings("unchecked")
- public Class<? extends AbstractFileInputTask> getFileInputClass() {
- return (Class<? extends AbstractFileInputTask>) this.invokableClass;
- }
-
-
- @Override
- public void read(final DataInput in) throws IOException {
- super.read(in);
-
- // Read path of the input file
- final boolean isNotNull = in.readBoolean();
- if (isNotNull) {
- this.path = new Path();
- this.path.read(in);
- }
- }
-
-
- @Override
- public void write(final DataOutput out) throws IOException {
- super.write(out);
-
- // Write out the path of the input file
- if (this.path == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- this.path.write(out);
- }
-
- }
-
-
- @Override
- public void checkConfiguration(final AbstractInvokable invokable) throws IllegalConfigurationException {
-
- // Check if the user has specified a path
- if (this.path == null) {
- throw new IllegalConfigurationException(this.getName() + " does not specify an input path");
- }
-
- // Check if the path is valid
- try {
- final FileSystem fs = this.path.getFileSystem();
- final FileStatus f = fs.getFileStatus(this.path);
- if (f == null) {
- throw new IOException(this.path.toString() + " led to a null object");
- }
- } catch (IOException e) {
- throw new IllegalConfigurationException("Cannot access file or directory: "
- + StringUtils.stringifyException(e));
- }
-
- // register the path in the configuration
- invokable.getTaskConfiguration()
- .setString(AbstractFileInputTask.INPUT_PATH_CONFIG_KEY, this.path.toString());
-
- // Finally, see if the task itself has a valid configuration
- super.checkConfiguration(invokable);
- }
-
-
- @Override
- public int getMaximumNumberOfSubtasks(final AbstractInvokable invokable) {
-
- int numberOfBlocks = -1;
-
- if (this.path == null) {
- return -1;
- }
-
- try {
- final FileSystem fs = this.path.getFileSystem();
- final FileStatus f = fs.getFileStatus(this.path);
- numberOfBlocks = fs.getNumberOfBlocks(f);
-
- } catch (IOException e) {
- return -1;
- }
-
- return (int) Math.min(numberOfBlocks, invokable.getMaximumNumberOfSubtasks());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileOutputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileOutputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileOutputVertex.java
deleted file mode 100644
index 645041a..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobFileOutputVertex.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobgraph;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import eu.stratosphere.configuration.IllegalConfigurationException;
-import eu.stratosphere.core.fs.FileStatus;
-import eu.stratosphere.core.fs.FileSystem;
-import eu.stratosphere.core.fs.Path;
-import eu.stratosphere.nephele.template.AbstractFileOutputTask;
-import eu.stratosphere.nephele.template.AbstractInvokable;
-
-/**
- * A JobFileOutputVertex is a specific subtype of a {@link AbstractJobOutputVertex} and is designed
- * for Nephele tasks which write data to a local or distributed file system. As every job output vertex
- * A JobFileOutputVertex must not have any further output.
- *
- */
-public class JobFileOutputVertex extends AbstractJobOutputVertex {
-
- /**
- * The path pointing to the output file/directory.
- */
- private Path path = null;
-
- /**
- * Creates a new job file output vertex with the specified name.
- *
- * @param name
- * the name of the new job file output vertex
- * @param id
- * the ID of this vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobFileOutputVertex(final String name, final JobVertexID id, final JobGraph jobGraph) {
- super(name, id, jobGraph);
- }
-
- /**
- * Creates a new job file output vertex with the specified name.
- *
- * @param name
- * the name of the new job file output vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobFileOutputVertex(final String name, final JobGraph jobGraph) {
- super(name, null, jobGraph);
- }
-
- /**
- * Creates a new job file input vertex.
- *
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobFileOutputVertex(final JobGraph jobGraph) {
- super(null, null, jobGraph);
- }
-
- /**
- * Sets the path of the file the job file input vertex's task should write to.
- *
- * @param path
- * the path of the file the job file input vertex's task should write to
- */
- public void setFilePath(final Path path) {
- this.path = path;
- }
-
- /**
- * Returns the path of the file the job file output vertex's task should write to.
- *
- * @return the path of the file the job file output vertex's task should write to or <code>null</code> if no path
- * has yet been set
- */
-
- public Path getFilePath() {
- return this.path;
- }
-
- /**
- * Sets the class of the vertex's output task.
- *
- * @param outputClass
- * the class of the vertex's output task.
- */
- public void setFileOutputClass(final Class<? extends AbstractFileOutputTask> outputClass) {
- this.invokableClass = outputClass;
- }
-
- /**
- * Returns the class of the vertex's output task.
- *
- * @return the class of the vertex's output task or <code>null</code> if no task has yet been set
- */
- @SuppressWarnings("unchecked")
- public Class<? extends AbstractFileOutputTask> getFileOutputClass() {
- return (Class<? extends AbstractFileOutputTask>) this.invokableClass;
- }
-
-
- @Override
- public void read(final DataInput in) throws IOException {
- super.read(in);
-
- // Read path of the input file
- boolean isNotNull = in.readBoolean();
- if (isNotNull) {
- this.path = new Path();
- this.path.read(in);
- }
- }
-
-
- @Override
- public void write(final DataOutput out) throws IOException {
- super.write(out);
-
- // Write out the path of the input file
- if (this.path == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- this.path.write(out);
- }
- }
-
-
- @Override
- public void checkConfiguration(final AbstractInvokable invokable) throws IllegalConfigurationException {
-
- // Check if the user has specified a path
- if (this.path == null) {
- throw new IllegalConfigurationException(this.getName() + " does not specify an output path");
- }
-
- super.checkConfiguration(invokable);
- }
-
-
- @Override
- public int getMaximumNumberOfSubtasks(final AbstractInvokable invokable) {
-
- if (this.path == null) {
- return 0;
- }
-
- // Check if the path is valid
- try {
- final FileSystem fs = path.getFileSystem();
-
- try {
- final FileStatus f = fs.getFileStatus(path);
-
- if (f == null) {
- return 1;
- }
-
- // If the path points to a directory we allow an infinity number of subtasks
- if (f.isDir()) {
- return -1;
- }
- } catch (FileNotFoundException fnfex) {
- // The exception is thrown if the requested file/directory does not exist.
- // if the degree of parallelism is > 1, we create a directory for this path
- if (getNumberOfSubtasks() > 1) {
- fs.mkdirs(path);
- return -1;
- } else {
- // a none existing file and a degree of parallelism that is one
- return 1;
- }
- }
- } catch (IOException e) {
- // any other kind of I/O exception: we assume only a degree of one here
- return 1;
- }
-
- return 1;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericInputVertex.java
deleted file mode 100644
index 658ea0d..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericInputVertex.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobgraph;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import eu.stratosphere.configuration.IllegalConfigurationException;
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.template.AbstractInputTask;
-import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.util.StringUtils;
-
-public class JobGenericInputVertex extends JobInputVertex
-{
- /**
- * Class of input task.
- */
- protected Class<? extends AbstractInputTask<?>> inputClass = null;
-
- /**
- * Creates a new job input vertex with the specified name.
- *
- * @param name The name of the new job file input vertex.
- * @param id The ID of this vertex.
- * @param jobGraph The job graph this vertex belongs to.
- */
- public JobGenericInputVertex(String name, JobVertexID id, JobGraph jobGraph) {
- super(name, id, jobGraph);
- }
-
- /**
- * Creates a new job file input vertex with the specified name.
- *
- * @param name The name of the new job file input vertex.
- * @param jobGraph The job graph this vertex belongs to.
- */
- public JobGenericInputVertex(String name, JobGraph jobGraph) {
- super(name, null, jobGraph);
- }
-
- /**
- * Creates a new job file input vertex.
- *
- * @param jobGraph The job graph this vertex belongs to.
- */
- public JobGenericInputVertex(JobGraph jobGraph) {
- super(null, null, jobGraph);
- }
-
- /**
- * Sets the class of the vertex's input task.
- *
- * @param inputClass The class of the vertex's input task.
- */
- public void setInputClass(Class<? extends AbstractInputTask<?>> inputClass) {
- this.inputClass = inputClass;
- }
-
- /**
- * Returns the class of the vertex's input task.
- *
- * @return the class of the vertex's input task or <code>null</code> if no task has yet been set
- */
- public Class<? extends AbstractInputTask<?>> getInputClass() {
- return this.inputClass;
- }
-
-
- @SuppressWarnings("unchecked")
- @Override
- public void read(DataInput in) throws IOException
- {
- super.read(in);
-
- // Read class
- boolean isNotNull = in.readBoolean();
- if (isNotNull) {
- // Read the name of the class and try to instantiate the class object
- final ClassLoader cl = LibraryCacheManager.getClassLoader(this.getJobGraph().getJobID());
- if (cl == null) {
- throw new IOException("Cannot find class loader for vertex " + getID());
- }
-
- // Read the name of the expected class
- final String className = StringRecord.readString(in);
-
- try {
- this.inputClass = (Class<? extends AbstractInputTask<?>>) Class.forName(className, true, cl).asSubclass(AbstractInputTask.class);
- }
- catch (ClassNotFoundException cnfe) {
- throw new IOException("Class " + className + " not found in one of the supplied jar files: "
- + StringUtils.stringifyException(cnfe));
- }
- catch (ClassCastException ccex) {
- throw new IOException("Class " + className + " is not a subclass of "
- + AbstractInputTask.class.getName() + ": " + StringUtils.stringifyException(ccex));
- }
- }
- }
-
-
- @Override
- public void write(DataOutput out) throws IOException
- {
- super.write(out);
-
- // Write out the name of the class
- if (this.inputClass == null) {
- out.writeBoolean(false);
- } else {
- out.writeBoolean(true);
- StringRecord.writeString(out, this.inputClass.getName());
- }
- }
-
-
- @Override
- public void checkConfiguration(AbstractInvokable invokable) throws IllegalConfigurationException
- {
- // see if the task itself has a valid configuration
- // because this is user code running on the master, we embed it in a catch-all block
- try {
- invokable.checkConfiguration();
- }
- catch (IllegalConfigurationException icex) {
- throw icex; // simply forward
- }
- catch (Throwable t) {
- throw new IllegalConfigurationException("Checking the invokable's configuration caused an error: "
- + StringUtils.stringifyException(t));
- }
- }
-
-
- @Override
- public Class<? extends AbstractInvokable> getInvokableClass() {
-
- return this.inputClass;
- }
-
-
- @Override
- public int getMaximumNumberOfSubtasks(AbstractInvokable invokable)
- {
- return invokable.getMaximumNumberOfSubtasks();
- }
-
-
- @Override
- public int getMinimumNumberOfSubtasks(AbstractInvokable invokable) {
-
- return invokable.getMinimumNumberOfSubtasks();
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericOutputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericOutputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericOutputVertex.java
deleted file mode 100644
index a5b0665..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGenericOutputVertex.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobgraph;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import eu.stratosphere.configuration.IllegalConfigurationException;
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
-import eu.stratosphere.util.StringUtils;
-
-/**
- * A JobGenericOutputVertex is a specific subtype of a {@link JobOutputVertex} and is designed
- * for Nephele tasks which sink data in a not further specified way. As every job output vertex,
- * a JobGenericOutputVertex must not have any further output.
- *
- */
-public class JobGenericOutputVertex extends JobOutputVertex {
-
- /**
- * The class of the output task.
- */
- protected Class<? extends AbstractOutputTask> outputClass = null;
-
-
- /**
- * Creates a new job file output vertex with the specified name.
- *
- * @param name
- * the name of the new job file output vertex
- * @param id
- * the ID of this vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobGenericOutputVertex(String name, JobVertexID id, JobGraph jobGraph) {
- super(name, id, jobGraph);
- }
-
- /**
- * Creates a new job file output vertex with the specified name.
- *
- * @param name
- * the name of the new job file output vertex
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobGenericOutputVertex(String name, JobGraph jobGraph) {
- super(name, null, jobGraph);
- }
-
- /**
- * Creates a new job file input vertex.
- *
- * @param jobGraph
- * the job graph this vertex belongs to
- */
- public JobGenericOutputVertex(JobGraph jobGraph) {
- super(null, null, jobGraph);
- }
-
- /**
- * Sets the class of the vertex's output task.
- *
- * @param outputClass The class of the vertex's output task.
- */
- public void setOutputClass(Class<? extends AbstractOutputTask> outputClass) {
- this.outputClass = outputClass;
- }
-
- /**
- * Returns the class of the vertex's output task.
- *
- * @return The class of the vertex's output task or <code>null</code> if no task has yet been set.
- */
- public Class<? extends AbstractOutputTask> getOutputClass() {
- return this.outputClass;
- }
-
-
- @Override
- public void read(DataInput in) throws IOException {
- super.read(in);
-
- // Read class
- boolean isNotNull = in.readBoolean();
- if (isNotNull) {
-
- // Read the name of the class and try to instantiate the class object
- final ClassLoader cl = LibraryCacheManager.getClassLoader(this.getJobGraph().getJobID());
- if (cl == null) {
- throw new IOException("Cannot find class loader for vertex " + getID());
- }
-
- // Read the name of the expected class
- final String className = StringRecord.readString(in);
-
- try {
- this.outputClass = Class.forName(className, true, cl).asSubclass(AbstractOutputTask.class);
- }
- catch (ClassNotFoundException cnfe) {
- throw new IOException("Class " + className + " not found in one of the supplied jar files: "
- + StringUtils.stringifyException(cnfe));
- }
- catch (ClassCastException ccex) {
- throw new IOException("Class " + className + " is not a subclass of "
- + AbstractOutputTask.class.getName() + ": " + StringUtils.stringifyException(ccex));
- }
- }
- }
-
-
- @Override
- public void write(DataOutput out) throws IOException {
- super.write(out);
-
- // Write out the name of the class
- if (this.outputClass == null) {
- out.writeBoolean(false);
- }
- else {
- out.writeBoolean(true);
- StringRecord.writeString(out, this.outputClass.getName());
- }
- }
-
-
- @Override
- public void checkConfiguration(AbstractInvokable invokable) throws IllegalConfigurationException
- {
- // see if the task itself has a valid configuration
- // because this is user code running on the master, we embed it in a catch-all block
- try {
- invokable.checkConfiguration();
- }
- catch (IllegalConfigurationException icex) {
- throw icex; // simply forward
- }
- catch (Throwable t) {
- throw new IllegalConfigurationException("Checking the invokable's configuration caused an error: "
- + StringUtils.stringifyException(t));
- }
- }
-
-
- @Override
- public Class<? extends AbstractInvokable> getInvokableClass() {
-
- return this.outputClass;
- }
-
-
- @Override
- public int getMaximumNumberOfSubtasks(AbstractInvokable invokable)
- {
- // Delegate call to invokable
- return invokable.getMaximumNumberOfSubtasks();
- }
-
-
- @Override
- public int getMinimumNumberOfSubtasks(AbstractInvokable invokable)
- {
- // Delegate call to invokable
- return invokable.getMinimumNumberOfSubtasks();
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
index a22d7ca..9e5f6c7 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobInputVertex.java
@@ -13,9 +13,21 @@
package eu.stratosphere.nephele.jobgraph;
+import eu.stratosphere.api.common.io.InputFormat;
+import eu.stratosphere.api.common.operators.util.UserCodeObjectWrapper;
+import eu.stratosphere.api.common.operators.util.UserCodeWrapper;
+import eu.stratosphere.api.common.typeutils.TypeSerializerFactory;
+import eu.stratosphere.configuration.Configuration;
+import eu.stratosphere.core.io.InputSplit;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
import eu.stratosphere.nephele.template.AbstractInputTask;
+import eu.stratosphere.pact.runtime.task.util.TaskConfig;
+
+import java.io.DataInput;
+import java.io.IOException;
public class JobInputVertex extends AbstractJobInputVertex {
+ private volatile InputFormat<?, ? extends InputSplit> inputFormat = null;
/**
* Creates a new job input vertex with the specified name.
@@ -55,7 +67,7 @@ public class JobInputVertex extends AbstractJobInputVertex {
/**
* Sets the class of the vertex's input task.
- *
+ *
* @param inputClass
* The class of the vertex's input task.
*/
@@ -72,4 +84,80 @@ public class JobInputVertex extends AbstractJobInputVertex {
public Class<? extends AbstractInputTask<?>> getInputClass() {
return (Class<? extends AbstractInputTask<?>>) this.invokableClass;
}
+
+ public void setInputFormat(UserCodeWrapper<? extends InputFormat<?, ? extends InputSplit>> inputFormatWrapper) {
+ TaskConfig config = new TaskConfig(this.getConfiguration());
+ config.setStubWrapper(inputFormatWrapper);
+
+ inputFormat = inputFormatWrapper.getUserCodeObject();
+ }
+
+ public void setInputFormat(InputFormat<?, ? extends InputSplit> inputFormat) {
+ this.inputFormat = inputFormat;
+
+ UserCodeWrapper<? extends InputFormat<?, ? extends InputSplit>> wrapper = new
+ UserCodeObjectWrapper<InputFormat<?, ? extends InputSplit>>(inputFormat);
+ TaskConfig config = new TaskConfig(this.getConfiguration());
+ config.setStubWrapper(wrapper);
+ }
+
+ public void setInputFormatParameters(Configuration inputFormatParameters){
+ TaskConfig config = new TaskConfig(this.getConfiguration());
+ config.setStubParameters(inputFormatParameters);
+
+ if(inputFormat == null){
+ throw new RuntimeException("There is no input format set in job vertex: " + this.getID());
+ }
+
+ inputFormat.configure(inputFormatParameters);
+ }
+
+ public void setOutputSerializer(TypeSerializerFactory<?> factory){
+ TaskConfig config = new TaskConfig(this.getConfiguration());
+ config.setOutputSerializer(factory);
+ }
+
+
+ @Override
+ public void read(final DataInput input) throws IOException{
+ super.read(input);
+
+ // load input format wrapper from the config
+ ClassLoader cl = null;
+
+ try{
+ cl = LibraryCacheManager.getClassLoader(this.getJobGraph().getJobID());
+ }
+ catch (IOException ioe) {
+ throw new RuntimeException("Usercode ClassLoader could not be obtained for job: " +
+ this.getJobGraph().getJobID(), ioe);
+ }
+
+ final Configuration config = this.getConfiguration();
+ config.setClassLoader(cl);
+ final TaskConfig taskConfig = new TaskConfig(config);
+
+ inputFormat = taskConfig.<InputFormat<?, InputSplit>>getStubWrapper(cl).getUserCodeObject(InputFormat.class,
+ cl);
+
+ inputFormat.configure(taskConfig.getStubParameters());
+ }
+
+ @Override
+ public Class<? extends InputSplit> getInputSplitType() {
+ if(inputFormat == null){
+ throw new RuntimeException("No input format has been set for job vertex: "+ this.getID());
+ }
+
+ return inputFormat.getInputSplitType();
+ }
+
+ @Override
+ public InputSplit[] getInputSplits(int minNumSplits) throws IOException {
+ if(inputFormat == null){
+ throw new RuntimeException("No input format has been set for job vertex: "+ this.getID());
+ }
+
+ return inputFormat.createInputSplits(minNumSplits);
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
index 31452c3..154e639 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobOutputVertex.java
@@ -13,7 +13,16 @@
package eu.stratosphere.nephele.jobgraph;
+import eu.stratosphere.api.common.io.OutputFormat;
+import eu.stratosphere.api.common.operators.util.UserCodeObjectWrapper;
+import eu.stratosphere.api.common.operators.util.UserCodeWrapper;
+import eu.stratosphere.configuration.Configuration;
+import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
import eu.stratosphere.nephele.template.AbstractOutputTask;
+import eu.stratosphere.pact.runtime.task.util.TaskConfig;
+
+import java.io.DataInput;
+import java.io.IOException;
/**
* A JobOutputVertex is a specific subtype of a {@link AbstractJobOutputVertex} and is designed
@@ -22,6 +31,7 @@ import eu.stratosphere.nephele.template.AbstractOutputTask;
*
*/
public class JobOutputVertex extends AbstractJobOutputVertex {
+ private volatile OutputFormat<?> outputFormat = null;
/**
* Creates a new job file output vertex with the specified name.
@@ -78,4 +88,50 @@ public class JobOutputVertex extends AbstractJobOutputVertex {
public Class<? extends AbstractOutputTask> getOutputClass() {
return (Class<? extends AbstractOutputTask>) this.invokableClass;
}
+
+ public void setOutputFormat(UserCodeWrapper<? extends OutputFormat<?>> outputFormatWrapper){
+ TaskConfig config = new TaskConfig(this.getConfiguration());
+ config.setStubWrapper(outputFormatWrapper);
+ outputFormat = outputFormatWrapper.getUserCodeObject();
+ }
+
+ public void setOutputFormat(OutputFormat<?> outputFormat){
+ this.outputFormat = outputFormat;
+ UserCodeWrapper<? extends OutputFormat<?>> wrapper = new UserCodeObjectWrapper<OutputFormat<?>>
+ (outputFormat);
+ TaskConfig config = new TaskConfig(this.getConfiguration());
+ config.setStubWrapper(wrapper);
+ }
+
+ public void setOutputFormatParameters(Configuration parameters){
+ TaskConfig config = new TaskConfig(this.getConfiguration());
+ config.setStubParameters(parameters);
+
+ outputFormat.configure(parameters);
+ }
+
+ @Override
+ public void read(final DataInput input) throws IOException{
+ super.read(input);
+
+ ClassLoader cl = null;
+ try{
+ cl = LibraryCacheManager.getClassLoader(this.getJobGraph().getJobID());
+ }
+ catch (IOException ioe) {
+ throw new RuntimeException("Usercode ClassLoader could not be obtained for job: " +
+ this.getJobGraph().getJobID(), ioe);
+ }
+
+ final Configuration config = this.getConfiguration();
+ config.setClassLoader(cl);
+ final TaskConfig taskConfig = new TaskConfig(config);
+
+ if(taskConfig.hasStubWrapper()){
+ outputFormat = taskConfig.<OutputFormat<?> >getStubWrapper(cl).getUserCodeObject(OutputFormat.class,cl);
+ outputFormat.configure(taskConfig.getStubParameters());
+ }
+ }
+
+ public OutputFormat<?> getOutputFormat() { return outputFormat; }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
index 61eb66c..8672aeb 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobTaskVertex.java
@@ -13,7 +13,6 @@
package eu.stratosphere.nephele.jobgraph;
-import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.nephele.template.AbstractTask;
/**
@@ -84,20 +83,4 @@ public class JobTaskVertex extends AbstractJobVertex {
public Class<? extends AbstractTask> getTaskClass() {
return (Class<? extends AbstractTask>) this.invokableClass;
}
-
-
- @Override
- public int getMaximumNumberOfSubtasks(final AbstractInvokable invokable) {
-
- // Delegate call to invokable
- return invokable.getMaximumNumberOfSubtasks();
- }
-
-
- @Override
- public int getMinimumNumberOfSubtasks(final AbstractInvokable invokable) {
-
- // Delegate call to invokable
- return invokable.getMinimumNumberOfSubtasks();
- }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
index bbef991..790aca9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/InputSplitManager.java
@@ -102,18 +102,7 @@ public final class InputSplitManager {
continue;
}
- final AbstractInvokable invokable = groupVertex.getEnvironment().getInvokable();
- if (!(invokable instanceof AbstractInputTask)) {
- LOG.error(groupVertex.getName() + " has " + inputSplits.length
- + " input splits, but is not of typt AbstractInputTask, ignoring...");
- continue;
- }
-
- @SuppressWarnings("unchecked")
- final AbstractInputTask<? extends InputSplit> inputTask = (AbstractInputTask<? extends InputSplit>) invokable;
- final Class<? extends InputSplit> splitType = inputTask.getInputSplitType();
-
- final InputSplitAssigner assigner = getAssignerByType(splitType, true);
+ final InputSplitAssigner assigner = getAssignerByType(groupVertex.getInputSplitType(), true);
// Add entry to cache for fast retrieval during the job execution
this.assignerCache.put(groupVertex, assigner);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
index 3717fbf..1e6929d 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/LocatableInputSplitAssigner.java
@@ -49,18 +49,8 @@ public final class LocatableInputSplitAssigner implements InputSplitAssigner {
@Override
public void registerGroupVertex(final ExecutionGroupVertex groupVertex) {
- // Do some sanity checks first
- final AbstractInvokable invokable = groupVertex.getEnvironment().getInvokable();
-
- // if (!(invokable instanceof AbstractFileInputTask)) {
- // LOG.error(groupVertex.getName() + " is not an input vertex, ignoring vertex...");
- // return;
- // }
-
- @SuppressWarnings("unchecked")
- final AbstractInputTask<? extends InputSplit> inputTask = (AbstractInputTask<? extends InputSplit>) invokable;
- if (!LocatableInputSplit.class.isAssignableFrom(inputTask.getInputSplitType())) {
- LOG.error(groupVertex.getName() + " produces input splits of type " + inputTask.getInputSplitType()
+ if (!LocatableInputSplit.class.isAssignableFrom(groupVertex.getInputSplitType())) {
+ LOG.error(groupVertex.getName() + " produces input splits of type " + groupVertex.getInputSplitType()
+ " and cannot be handled by this split assigner");
return;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
index 7894334..048562c 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobmanager/splitassigner/file/FileInputSplitAssigner.java
@@ -50,18 +50,8 @@ public final class FileInputSplitAssigner implements InputSplitAssigner {
@Override
public void registerGroupVertex(final ExecutionGroupVertex groupVertex) {
- // Do some sanity checks first
- final AbstractInvokable invokable = groupVertex.getEnvironment().getInvokable();
-
- // if (!(invokable instanceof AbstractFileInputTask)) {
- // LOG.error(groupVertex.getName() + " is not an input vertex, ignoring vertex...");
- // return;
- // }
-
- @SuppressWarnings("unchecked")
- final AbstractInputTask<? extends InputSplit> inputTask = (AbstractInputTask<? extends InputSplit>) invokable;
- if (!FileInputSplit.class.equals(inputTask.getInputSplitType())) {
- LOG.error(groupVertex.getName() + " produces input splits of type " + inputTask.getInputSplitType()
+ if (!FileInputSplit.class.equals(groupVertex.getInputSplitType())) {
+ LOG.error(groupVertex.getName() + " produces input splits of type " + groupVertex.getInputSplitType()
+ " and cannot be handled by this split assigner");
return;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileInputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileInputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileInputTask.java
deleted file mode 100644
index d16e757..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileInputTask.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.template;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
-import eu.stratosphere.core.fs.BlockLocation;
-import eu.stratosphere.core.fs.FileInputSplit;
-import eu.stratosphere.core.fs.FileStatus;
-import eu.stratosphere.core.fs.FileSystem;
-import eu.stratosphere.core.fs.Path;
-
-/**
- * Specialized subtype of {@link AbstractInputTask} for tasks which are supposed to generate input from
- * a file. In addition to {@link AbstractInputTask} this class includes a method to query file splits
- * which should be read during the task's execution.
- *
- */
-public abstract class AbstractFileInputTask extends AbstractInputTask<FileInputSplit> {
-
- public static final String INPUT_PATH_CONFIG_KEY = "input.path";
-
- /**
- * The fraction that the last split may be larger than the others.
- */
- private static final float MAX_SPLIT_SIZE_DISCREPANCY = 1.1f;
-
- // --------------------------------------------------------------------------------------------
-
- /**
- * Returns an iterator to a (possible empty) list of file input splits which is expected to be consumed by this
- * instance of the {@link AbstractFileInputTask}.
- *
- * @return an iterator to a (possible empty) list of file input splits.
- */
- public Iterator<FileInputSplit> getFileInputSplits() {
-
- return new InputSplitIterator<FileInputSplit>(getEnvironment().getInputSplitProvider());
- }
-
-
- @Override
- public FileInputSplit[] computeInputSplits(final int minNumSplits) throws IOException {
-
- final String pathURI = getTaskConfiguration().getString(INPUT_PATH_CONFIG_KEY, null);
- if (pathURI == null) {
- throw new IOException("The path to the file was not found in the runtime configuration.");
- }
-
- final Path path;
- try {
- path = new Path(pathURI);
- } catch (Exception iaex) {
- throw new IOException("Invalid file path specifier: ", iaex);
- }
-
- final List<FileInputSplit> inputSplits = new ArrayList<FileInputSplit>();
-
- // get all the files that are involved in the splits
- final List<FileStatus> files = new ArrayList<FileStatus>();
- long totalLength = 0;
-
- final FileSystem fs = path.getFileSystem();
- final FileStatus pathFile = fs.getFileStatus(path);
-
- if (pathFile.isDir()) {
- // input is directory. list all contained files
- final FileStatus[] dir = fs.listStatus(path);
- for (int i = 0; i < dir.length; i++) {
- if (!dir[i].isDir()) {
- files.add(dir[i]);
- totalLength += dir[i].getLen();
- }
- }
-
- } else {
- files.add(pathFile);
- totalLength += pathFile.getLen();
- }
-
- final long minSplitSize = 1;
- final long maxSplitSize = (minNumSplits < 1) ? Long.MAX_VALUE : (totalLength / minNumSplits +
- (totalLength % minNumSplits == 0 ? 0 : 1));
-
- // now that we have the files, generate the splits
- int splitNum = 0;
- for (final FileStatus file : files) {
-
- final long len = file.getLen();
- final long blockSize = file.getBlockSize();
-
- final long splitSize = Math.max(minSplitSize, Math.min(maxSplitSize, blockSize));
- final long halfSplit = splitSize >>> 1;
-
- final long maxBytesForLastSplit = (long) (splitSize * MAX_SPLIT_SIZE_DISCREPANCY);
-
- if (len > 0) {
-
- // get the block locations and make sure they are in order with respect to their offset
- final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, len);
- Arrays.sort(blocks);
-
- long bytesUnassigned = len;
- long position = 0;
-
- int blockIndex = 0;
-
- while (bytesUnassigned > maxBytesForLastSplit) {
- // get the block containing the majority of the data
- blockIndex = getBlockIndexForPosition(blocks, position, halfSplit, blockIndex);
- // create a new split
- final FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), position, splitSize,
- blocks[blockIndex]
- .getHosts());
- inputSplits.add(fis);
-
- // adjust the positions
- position += splitSize;
- bytesUnassigned -= splitSize;
- }
-
- // assign the last split
- if (bytesUnassigned > 0) {
- blockIndex = getBlockIndexForPosition(blocks, position, halfSplit, blockIndex);
- final FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), position,
- bytesUnassigned,
- blocks[blockIndex].getHosts());
- inputSplits.add(fis);
- }
- } else {
- // special case with a file of zero bytes size
- final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, 0);
- String[] hosts;
- if (blocks.length > 0) {
- hosts = blocks[0].getHosts();
- } else {
- hosts = new String[0];
- }
- final FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), 0, 0, hosts);
- inputSplits.add(fis);
- }
- }
-
- return inputSplits.toArray(new FileInputSplit[inputSplits.size()]);
- }
-
- /**
- * Retrieves the index of the <tt>BlockLocation</tt> that contains the part of the file described by the given
- * offset.
- *
- * @param blocks
- * The different blocks of the file. Must be ordered by their offset.
- * @param offset
- * The offset of the position in the file.
- * @param startIndex
- * The earliest index to look at.
- * @return The index of the block containing the given position.
- */
- private final int getBlockIndexForPosition(final BlockLocation[] blocks, final long offset,
- final long halfSplitSize, final int startIndex) {
-
- // go over all indexes after the startIndex
- for (int i = startIndex; i < blocks.length; i++) {
- long blockStart = blocks[i].getOffset();
- long blockEnd = blockStart + blocks[i].getLength();
-
- if (offset >= blockStart && offset < blockEnd) {
- // got the block where the split starts
- // check if the next block contains more than this one does
- if (i < blocks.length - 1 && blockEnd - offset < halfSplitSize) {
- return i + 1;
- } else {
- return i;
- }
- }
- }
- throw new IllegalArgumentException("The given offset is not contained in the any block.");
- }
-
-
- @Override
- public Class<FileInputSplit> getInputSplitType() {
-
- return FileInputSplit.class;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileOutputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileOutputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileOutputTask.java
deleted file mode 100644
index 5f231c1..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractFileOutputTask.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.template;
-
-import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.core.fs.Path;
-
-/**
- * Specialized subtype of {@link AbstractOutputTask} for tasks which are supposed to write output to
- * a file.
- *
- */
-public abstract class AbstractFileOutputTask extends AbstractOutputTask {
-
- /**
- * Returns the output path which has been assigned to the original {@link JobFileOutputVertex}.
- *
- * @return the output path which has been assigned to the original {@link JobFileOutputVertex} or <code>null</code>
- * if the path cannot be retrieved
- */
- public Path getFileOutputPath() {
-
- // TODO: This is a quick workaround, problem can be solved in a more generic way
- final Configuration conf = getEnvironment().getTaskConfiguration();
-
- final String outputPath = conf.getString("outputPath", null);
-
- if (outputPath != null) {
- return new Path(outputPath);
- }
-
- return null;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractGenericInputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractGenericInputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractGenericInputTask.java
deleted file mode 100644
index cf6d916..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractGenericInputTask.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.template;
-
-import eu.stratosphere.core.io.GenericInputSplit;
-
-/**
- * An input task that processes generic input splits (partitions).
- */
-public abstract class AbstractGenericInputTask extends AbstractInputTask<GenericInputSplit> {
-
-
- @Override
- public GenericInputSplit[] computeInputSplits(final int requestedMinNumber) throws Exception {
- GenericInputSplit[] splits = new GenericInputSplit[requestedMinNumber];
- for (int i = 0; i < requestedMinNumber; i++) {
- splits[i] = new GenericInputSplit(i,requestedMinNumber);
- }
- return splits;
- }
-
-
- @Override
- public Class<GenericInputSplit> getInputSplitType() {
-
- return GenericInputSplit.class;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java
index 76c9377..88e4fcb 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java
@@ -27,29 +27,6 @@ import eu.stratosphere.core.io.InputSplit;
public abstract class AbstractInputTask<T extends InputSplit> extends AbstractInvokable {
/**
- * This method computes the different splits of the input that can be processed in parallel. It needs
- * to be implemented by classes that describe input tasks.
- * <p>
- * Note that this method does not return the input splits for the task instance only, but it computes all splits for
- * all parallel instances. Those computed splits are then assigned to the individual task instances by the Job
- * Manager. To obtain the input splits for the current task instance, use the {@link #getTaskInputSplits()} method.
- *
- * @param requestedMinNumber
- * The minimum number of splits to create. This is a hint by the system how many splits
- * should be generated at least (typically because there are that many parallel task
- * instances), but it is no hard constraint
- * @return The input splits for the input to be processed by all instances of this input task
- */
- public abstract T[] computeInputSplits(int requestedMinNumber) throws Exception;
-
- /**
- * Returns the type of input splits that is generated by this input task.
- *
- * @return the type of input splits that is generated by this input task
- */
- public abstract Class<T> getInputSplitType();
-
- /**
* Returns an iterator to a (possible empty) list of input splits which is expected to be consumed by this
* instance of the {@link AbstractInputTask}.
*
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
index a37f592..79390f8 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
@@ -62,39 +62,6 @@ public abstract class AbstractInvokable {
return this.environment;
}
- /**
- * Overwrite this method to implement task specific checks if the
- * respective task has been configured properly.
- *
- * @throws IllegalConfigurationException
- * thrown if the respective tasks is not configured properly
- */
- public void checkConfiguration() throws IllegalConfigurationException {
- // The default implementation does nothing
- }
-
- /**
- * Overwrite this method to provide the minimum number of subtasks the respective task
- * must be split into at runtime.
- *
- * @return the minimum number of subtasks the respective task must be split into at runtime
- */
- public int getMinimumNumberOfSubtasks() {
- // The default implementation always returns 1
- return 1;
- }
-
- /**
- * Overwrite this method to provide the maximum number of subtasks the respective task
- * can be split into at runtime.
- *
- * @return the maximum number of subtasks the respective task can be split into at runtime, <code>-1</code> for
- * infinity
- */
- public int getMaximumNumberOfSubtasks() {
- // The default implementation always returns -1
- return -1;
- }
/**
* Returns the current number of subtasks the respective task is split into.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/GenericInputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/GenericInputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/GenericInputTask.java
deleted file mode 100644
index c2cbbc1..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/GenericInputTask.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.template;
-
-import eu.stratosphere.core.io.GenericInputSplit;
-
-/**
- * An input task that processes generic input splits (partitions).
- */
-public abstract class GenericInputTask extends AbstractInputTask<GenericInputSplit> {
-
-
- @Override
- public GenericInputSplit[] computeInputSplits(final int requestedMinNumber) throws Exception {
- GenericInputSplit[] splits = new GenericInputSplit[requestedMinNumber];
- for (int i = 0; i < requestedMinNumber; i++) {
- splits[i] = new GenericInputSplit(i, requestedMinNumber);
- }
- return splits;
- }
-
-
- @Override
- public Class<GenericInputSplit> getInputSplitType() {
-
- return GenericInputSplit.class;
- }
-}
[11/22] git commit: Rework the Taskmanager to a slot based model and
remove legacy cloud code
Posted by se...@apache.org.
Rework the Taskmanager to a slot based model and remove legacy cloud code
Squashed commit of the following:
- Post merge cleanup
- Renamed fractionMemory into memoryFraction.
- Removed Local and QueueScheduler and replaced it instead with an unified DefaultScheduler.
- Removed Local and ClusterManager and inserted instead an unified DefaultInstanceManager.
- Removed connection IDs from execution edges
- Removed InstanceType, InstanceRequestMap, InstanceTypeDescription, InstanceTypeDescriptionTypeFactory, PendingRequestsMap
- Fixed problems with test cases.
- introduced simple slot system for scheduling.
- Removed subtasks per instance
- Added registerTaskManager to the JobManager RPC calls. RegisterTaskManager is called only once where the hardware description information is sent.
Add: Merging cloudmodel remove with new network stack
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/86d206c4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/86d206c4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/86d206c4
Branch: refs/heads/master
Commit: 86d206c41922a1b7b8c2839b65d3568f9be55e0c
Parents: 7b6b5a2
Author: Till Rohrmann <ti...@gmail.com>
Authored: Sun Jun 1 16:03:27 2014 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:10 2014 +0200
----------------------------------------------------------------------
.../api/avro/AvroExternalJarProgramITCase.java | 1 +
.../eu/stratosphere/client/LocalExecutor.java | 9 +
.../client/minicluster/NepheleMiniCluster.java | 23 +-
.../eu/stratosphere/client/program/Client.java | 5 +-
.../client/CliFrontendListCancelTest.java | 11 +-
.../stratosphere/client/testjar/WordCount.java | 3 +-
.../eu/stratosphere/compiler/PactCompiler.java | 473 +---------
.../compiler/costs/DefaultCostEstimator.java | 18 +-
.../dag/AbstractPartialSolutionNode.java | 5 -
.../compiler/dag/BinaryUnionNode.java | 48 +-
.../compiler/dag/BulkIterationNode.java | 20 +-
.../stratosphere/compiler/dag/DataSinkNode.java | 18 +-
.../compiler/dag/DataSourceNode.java | 16 -
.../compiler/dag/GroupReduceNode.java | 1 -
.../compiler/dag/OptimizerNode.java | 51 +-
.../stratosphere/compiler/dag/ReduceNode.java | 1 -
.../compiler/dag/SingleInputNode.java | 41 +-
.../stratosphere/compiler/dag/SinkJoiner.java | 1 -
.../stratosphere/compiler/dag/TwoInputNode.java | 56 +-
.../compiler/dag/WorksetIterationNode.java | 12 +-
.../RequestedGlobalProperties.java | 8 +-
.../AllGroupWithPartialPreGroupProperties.java | 3 +-
.../compiler/operators/AllReduceProperties.java | 3 +-
.../GroupReduceWithCombineProperties.java | 6 +-
.../operators/PartialGroupProperties.java | 6 +-
.../compiler/operators/ReduceProperties.java | 3 +-
.../eu/stratosphere/compiler/plan/Channel.java | 59 +-
.../eu/stratosphere/compiler/plan/PlanNode.java | 25 +-
.../plandump/PlanJSONDumpGenerator.java | 3 -
.../plantranslate/NepheleJobGraphGenerator.java | 80 +-
.../pact/compiler/CompilerTestBase.java | 24 +-
.../configuration/ConfigConstants.java | 12 +-
.../java/eu/stratosphere/util/ClassUtils.java | 1 +
.../event/job/VertexAssignmentEvent.java | 32 +-
.../nephele/executiongraph/ExecutionEdge.java | 9 -
.../nephele/executiongraph/ExecutionGraph.java | 181 ++--
.../executiongraph/ExecutionGroupVertex.java | 184 +---
.../nephele/executiongraph/ExecutionStage.java | 112 +--
.../nephele/executiongraph/ExecutionVertex.java | 1 -
.../executiongraph/InternalJobStatus.java | 1 +
.../executiongraph/ManagementGraphFactory.java | 7 +-
.../nephele/instance/AbstractInstance.java | 297 ------
.../nephele/instance/AllocatedResource.java | 38 +-
.../nephele/instance/AllocatedSlot.java | 65 ++
.../nephele/instance/AllocationID.java | 4 +-
.../instance/DefaultInstanceManager.java | 393 ++++++++
.../nephele/instance/DummyInstance.java | 14 +-
.../stratosphere/nephele/instance/Hardware.java | 24 +
.../stratosphere/nephele/instance/Instance.java | 362 +++++++
.../nephele/instance/InstanceManager.java | 145 +--
.../nephele/instance/InstanceNotifier.java | 71 ++
.../nephele/instance/InstanceRequestMap.java | 184 ----
.../nephele/instance/InstanceType.java | 199 ----
.../instance/InstanceTypeDescription.java | 137 ---
.../InstanceTypeDescriptionFactory.java | 46 -
.../nephele/instance/InstanceTypeFactory.java | 91 --
.../nephele/instance/LocalInstanceManager.java | 60 ++
.../instance/cluster/AllocatedSlice.java | 120 ---
.../instance/cluster/ClusterInstance.java | 181 ----
.../cluster/ClusterInstanceNotifier.java | 71 --
.../instance/cluster/ClusterManager.java | 945 -------------------
.../instance/cluster/PendingRequestsMap.java | 97 --
.../nephele/instance/local/LocalInstance.java | 37 -
.../instance/local/LocalInstanceManager.java | 418 --------
.../instance/local/LocalInstanceNotifier.java | 70 --
.../nephele/jobgraph/AbstractJobVertex.java | 100 +-
.../nephele/jobmanager/DeploymentManager.java | 8 +-
.../nephele/jobmanager/EventCollector.java | 10 +-
.../nephele/jobmanager/JobManager.java | 98 +-
.../nephele/jobmanager/JobManagerUtils.java | 54 +-
.../scheduler/AbstractExecutionListener.java | 166 ----
.../jobmanager/scheduler/AbstractScheduler.java | 662 -------------
.../scheduler/DefaultExecutionListener.java | 127 +++
.../jobmanager/scheduler/DefaultScheduler.java | 762 +++++++++++++++
.../jobmanager/scheduler/RecoveryLogic.java | 248 -----
.../scheduler/local/LocalExecutionListener.java | 33 -
.../scheduler/local/LocalScheduler.java | 213 -----
.../scheduler/queue/QueueExecutionListener.java | 40 -
.../scheduler/queue/QueueScheduler.java | 216 -----
.../splitassigner/InputSplitManager.java | 2 +-
.../LocatableInputSplitAssigner.java | 4 +-
.../splitassigner/LocatableInputSplitList.java | 20 +-
.../file/FileInputSplitAssigner.java | 4 +-
.../splitassigner/file/FileInputSplitList.java | 20 +-
.../managementgraph/ManagementGraph.java | 4 +-
.../managementgraph/ManagementVertex.java | 35 +-
.../eu/stratosphere/nephele/net/NetUtils.java | 2 +
.../profiling/impl/JobProfilingData.java | 6 +-
.../protocols/ExtendedManagementProtocol.java | 23 +-
.../nephele/protocols/JobManagerProtocol.java | 19 +-
.../services/iomanager/ChannelAccess.java | 1 +
.../services/memorymanager/MemoryManager.java | 17 +-
.../memorymanager/spi/DefaultMemoryManager.java | 39 +-
.../nephele/taskmanager/TaskManager.java | 123 ++-
.../RegisterTaskManagerResult.java | 50 +
.../nephele/topology/NetworkNode.java | 10 -
.../eu/stratosphere/nephele/util/IOUtils.java | 1 +
.../pact/runtime/cache/FileCache.java | 9 +-
.../hash/BuildFirstHashMatchIterator.java | 8 +-
.../BuildFirstReOpenableHashMatchIterator.java | 8 +-
.../hash/BuildSecondHashMatchIterator.java | 8 +-
.../pact/runtime/hash/InMemoryPartition.java | 2 +
.../iterative/task/IterationHeadPactTask.java | 5 +-
.../pact/runtime/shipping/ShipStrategyType.java | 23 +-
.../runtime/sort/AsynchronousPartialSorter.java | 11 +-
.../AsynchronousPartialSorterCollector.java | 7 +-
.../sort/CombiningUnilateralSortMerger.java | 18 +-
.../pact/runtime/sort/UnilateralSortMerger.java | 18 +-
.../AbstractCachedBuildSideMatchDriver.java | 2 +-
.../pact/runtime/task/CrossDriver.java | 3 +-
.../pact/runtime/task/DataSinkTask.java | 2 +-
.../runtime/task/GroupReduceCombineDriver.java | 4 +-
.../pact/runtime/task/MatchDriver.java | 38 +-
.../pact/runtime/task/ReduceCombineDriver.java | 3 +-
.../pact/runtime/task/RegularPactTask.java | 12 +-
.../SynchronousChainedCombineDriver.java | 2 +-
.../pact/runtime/task/util/TaskConfig.java | 68 +-
.../runtime/io/channels/InputChannel.java | 9 +-
.../runtime/io/gates/InputGate.java | 2 +
.../runtime/io/network/RemoteReceiver.java | 20 +-
.../nephele/event/job/ManagementEventTest.java | 4 +-
.../executiongraph/ExecutionGraphTest.java | 258 +----
.../instance/cluster/ClusterManagerTest.java | 273 ------
.../cluster/ClusterManagerTestUtils.java | 66 --
.../cluster/DefaultInstanceManagerTest.java | 232 +++++
.../DefaultInstanceManagerTestUtils.java | 66 ++
.../instance/cluster/HostInClusterTest.java | 130 ++-
.../cluster/PendingRequestsMapTest.java | 91 --
.../local/LocalInstanceManagerTest.java | 17 +-
.../nephele/jobmanager/JobManagerITCase.java | 16 +-
.../scheduler/queue/DefaultSchedulerTest.java | 185 ++++
.../scheduler/queue/QueueSchedulerTest.java | 186 ----
.../scheduler/queue/TestDeploymentManager.java | 4 +-
.../scheduler/queue/TestInstanceManager.java | 118 +--
.../managementgraph/ManagementGraphTest.java | 11 +-
.../services/iomanager/IOManagerITCase.java | 2 +-
.../IOManagerPerformanceBenchmark.java | 2 +-
.../services/iomanager/IOManagerTest.java | 2 +-
.../memorymanager/MemorySegmentTest.java | 2 +-
.../nephele/util/ServerTestUtils.java | 17 +-
.../runtime/hash/HashMatchIteratorITCase.java | 14 +-
.../pact/runtime/hash/HashTableITCase.java | 2 +-
.../runtime/hash/ReOpenableHashTableITCase.java | 4 +-
.../pact/runtime/io/ChannelViewsTest.java | 8 +-
.../pact/runtime/io/SpillingBufferTest.java | 2 +-
.../event/EventWithAggregatorsTest.java | 2 +
.../resettable/BlockResettableIteratorTest.java | 2 +-
...lockResettableMutableObjectIteratorTest.java | 2 +-
.../sort/AsynchonousPartialSorterITCase.java | 14 +-
.../CombiningUnilateralSortMergerITCase.java | 8 +-
.../pact/runtime/sort/ExternalSortITCase.java | 12 +-
.../sort/MassiveStringSortingITCase.java | 4 +-
.../sort/SortMergeMatchIteratorITCase.java | 2 +-
.../runtime/task/CombineTaskExternalITCase.java | 8 +-
.../pact/runtime/task/CombineTaskTest.java | 10 +-
.../runtime/task/CrossTaskExternalITCase.java | 7 +-
.../pact/runtime/task/CrossTaskTest.java | 36 +-
.../pact/runtime/task/DataSinkTaskTest.java | 47 +-
.../runtime/task/MatchTaskExternalITCase.java | 14 +-
.../pact/runtime/task/MatchTaskTest.java | 56 +-
.../runtime/task/ReduceTaskExternalITCase.java | 8 +-
.../pact/runtime/task/ReduceTaskTest.java | 3 +-
.../runtime/task/chaining/ChainTaskTest.java | 19 +-
.../task/drivers/ReduceCombineDriverTest.java | 10 +-
.../runtime/task/drivers/TestTaskContext.java | 2 +-
.../pact/runtime/test/util/DriverTestBase.java | 8 +-
.../pact/runtime/test/util/MockEnvironment.java | 9 +-
.../netty/InboundEnvelopeDecoderTest.java | 2 +-
.../test/compiler/util/CompilerTestBase.java | 26 +-
.../test/util/AbstractTestBase.java | 48 +-
.../test/util/JavaProgramTestBase.java | 2 +
.../test/util/RecordAPITestBase.java | 3 +
.../test/accumulators/AccumulatorITCase.java | 7 +-
.../BroadcastVarsNepheleITCase.java | 16 +-
.../KMeansIterativeNepheleITCase.java | 30 +-
.../test/cancelling/CancellingTestBase.java | 10 +-
.../test/cancelling/MapCancelingITCase.java | 13 +-
.../cancelling/MatchJoinCancelingITCase.java | 17 +-
.../clients/examples/LocalExecutorITCase.java | 10 +-
.../exampleJavaPrograms/WordCountITCase.java | 4 +-
.../ComputeEdgeDegreesITCase.java | 2 +-
.../ConnectedComponentsITCase.java | 2 +-
.../EnumTrianglesOnEdgesWithDegreesITCase.java | 2 +-
.../TransitiveClosureNaiveITCase.java | 2 +-
.../WebLogAnalysisITCase.java | 2 +-
.../exampleScalaPrograms/WordCountITCase.java | 2 +-
.../WordCountPactValueITCase.java | 2 +-
.../WordCountWithCountFunctionITCase.java | 2 +-
.../test/failingPrograms/TaskFailureITCase.java | 8 +-
.../CoGroupConnectedComponentsITCase.java | 6 +-
.../iterative/ConnectedComponentsITCase.java | 6 +-
...ectedComponentsWithDeferredUpdateITCase.java | 3 +-
...tedComponentsWithSolutionSetFirstITCase.java | 7 +-
.../test/iterative/DanglingPageRankITCase.java | 3 +-
.../test/iterative/DeltaPageRankITCase.java | 3 +-
.../DependencyConnectedComponentsITCase.java | 5 +-
...IterationTerminationWithTerminationTail.java | 6 +-
.../IterationTerminationWithTwoTails.java | 6 +-
.../IterationWithAllReducerITCase.java | 6 +-
.../iterative/IterationWithChainingITCase.java | 3 +-
.../iterative/IterationWithUnionITCase.java | 3 +-
.../test/iterative/IterativeKMeansITCase.java | 6 +-
.../test/iterative/KMeansITCase.java | 8 +-
.../test/iterative/LineRankITCase.java | 5 +-
.../test/iterative/PageRankITCase.java | 3 +-
.../ConnectedComponentsNepheleITCase.java | 54 +-
.../nephele/DanglingPageRankNepheleITCase.java | 7 +-
...nglingPageRankWithCombinerNepheleITCase.java | 7 +-
.../IterationWithChainingNepheleITCase.java | 17 +-
.../test/iterative/nephele/JobGraphUtils.java | 20 +-
.../CustomCompensatableDanglingPageRank.java | 57 +-
...mpensatableDanglingPageRankWithCombiner.java | 59 +-
.../CompensatableDanglingPageRank.java | 55 +-
.../PackagedProgramEndToEndITCase.java | 15 +-
.../test/operators/UnionSinkITCase.java | 3 +-
.../recordJobTests/CollectionSourceTest.java | 8 +-
.../ComputeEdgeDegreesITCase.java | 3 +-
.../EnumTrianglesOnEdgesWithDegreesITCase.java | 3 +-
.../recordJobTests/EnumTrianglesRDFITCase.java | 4 +-
.../recordJobTests/GlobalSortingITCase.java | 5 +-
.../GlobalSortingMixedOrderITCase.java | 62 +-
.../recordJobTests/GroupOrderReduceITCase.java | 3 +-
.../recordJobTests/MergeOnlyJoinITCase.java | 1 +
.../test/recordJobTests/PairwiseSPITCase.java | 4 +-
.../test/recordJobTests/TPCHQuery10ITCase.java | 2 +-
.../test/recordJobTests/TPCHQuery3ITCase.java | 3 +-
.../TPCHQuery3WithUnionITCase.java | 6 +-
.../test/recordJobTests/TPCHQuery4ITCase.java | 6 +-
.../test/recordJobTests/TPCHQuery9ITCase.java | 6 +-
.../recordJobTests/TPCHQueryAsterixITCase.java | 6 +-
.../test/recordJobTests/TeraSortITCase.java | 7 +-
.../recordJobTests/WebLogAnalysisITCase.java | 6 +-
.../test/recordJobTests/WordCountITCase.java | 6 +-
.../WordCountUnionReduceITCase.java | 6 +-
.../test/runtime/NetworkStackThroughput.java | 49 +-
235 files changed, 3917 insertions(+), 7900 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-addons/avro/src/test/java/eu/stratosphere/api/avro/AvroExternalJarProgramITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-addons/avro/src/test/java/eu/stratosphere/api/avro/AvroExternalJarProgramITCase.java b/stratosphere-addons/avro/src/test/java/eu/stratosphere/api/avro/AvroExternalJarProgramITCase.java
index a766fcb..e398acf 100644
--- a/stratosphere-addons/avro/src/test/java/eu/stratosphere/api/avro/AvroExternalJarProgramITCase.java
+++ b/stratosphere-addons/avro/src/test/java/eu/stratosphere/api/avro/AvroExternalJarProgramITCase.java
@@ -47,6 +47,7 @@ public class AvroExternalJarProgramITCase {
try {
testMiniCluster = new NepheleMiniCluster();
testMiniCluster.setJobManagerRpcPort(TEST_JM_PORT);
+ testMiniCluster.setTaskManagerNumSlots(4);
testMiniCluster.start();
String jarFile = JAR_FILE;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-clients/src/main/java/eu/stratosphere/client/LocalExecutor.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/main/java/eu/stratosphere/client/LocalExecutor.java b/stratosphere-clients/src/main/java/eu/stratosphere/client/LocalExecutor.java
index 89f996a..b017220 100644
--- a/stratosphere-clients/src/main/java/eu/stratosphere/client/LocalExecutor.java
+++ b/stratosphere-clients/src/main/java/eu/stratosphere/client/LocalExecutor.java
@@ -42,6 +42,8 @@ public class LocalExecutor extends PlanExecutor {
private static boolean DEFAULT_OVERWRITE = false;
+ private static final int DEFAULT_TASK_MANAGER_NUM_SLOTS = 1;
+
private final Object lock = new Object(); // we lock to ensure singleton execution
private NepheleMiniCluster nephele;
@@ -54,6 +56,8 @@ public class LocalExecutor extends PlanExecutor {
private int taskManagerDataPort = -1;
+ private int taskManagerNumSlots = DEFAULT_TASK_MANAGER_NUM_SLOTS;
+
private String configDir;
private String hdfsConfigFile;
@@ -129,6 +133,10 @@ public class LocalExecutor extends PlanExecutor {
public void setDefaultAlwaysCreateDirectory(boolean defaultAlwaysCreateDirectory) {
this.defaultAlwaysCreateDirectory = defaultAlwaysCreateDirectory;
}
+
+ public void setTaskManagerNumSlots(int taskManagerNumSlots) { this.taskManagerNumSlots = taskManagerNumSlots; }
+
+ public int getTaskManagerNumSlots() { return this.taskManagerNumSlots; }
// --------------------------------------------------------------------------------------------
@@ -157,6 +165,7 @@ public class LocalExecutor extends PlanExecutor {
}
nephele.setDefaultOverwriteFiles(defaultOverwriteFiles);
nephele.setDefaultAlwaysCreateDirectory(defaultAlwaysCreateDirectory);
+ nephele.setTaskManagerNumSlots(taskManagerNumSlots);
// start it up
this.nephele.start();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-clients/src/main/java/eu/stratosphere/client/minicluster/NepheleMiniCluster.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/main/java/eu/stratosphere/client/minicluster/NepheleMiniCluster.java b/stratosphere-clients/src/main/java/eu/stratosphere/client/minicluster/NepheleMiniCluster.java
index 79e5c64..4daca26 100644
--- a/stratosphere-clients/src/main/java/eu/stratosphere/client/minicluster/NepheleMiniCluster.java
+++ b/stratosphere-clients/src/main/java/eu/stratosphere/client/minicluster/NepheleMiniCluster.java
@@ -46,6 +46,8 @@ public class NepheleMiniCluster {
private static final boolean DEFAULT_LAZY_MEMORY_ALLOCATION = true;
+ private static final int DEFAULT_TASK_MANAGER_NUM_SLOTS = -1;
+
// --------------------------------------------------------------------------------------------
private final Object startStopLock = new Object();
@@ -56,7 +58,9 @@ public class NepheleMiniCluster {
private int taskManagerDataPort = DEFAULT_TM_DATA_PORT;
- private int numTaskManager = DEFAULT_NUM_TASK_MANAGER;
+ private int numTaskTracker = DEFAULT_NUM_TASK_MANAGER;
+
+ private int taskManagerNumSlots = DEFAULT_TASK_MANAGER_NUM_SLOTS;
private long memorySize = DEFAULT_MEMORY_SIZE;
@@ -149,9 +153,13 @@ public class NepheleMiniCluster {
this.defaultAlwaysCreateDirectory = defaultAlwaysCreateDirectory;
}
- public void setNumTaskManager(int numTaskManager) { this.numTaskManager = numTaskManager; }
+ public void setNumTaskTracker(int numTaskTracker) { this.numTaskTracker = numTaskTracker; }
+
+ public int getNumTaskTracker() { return numTaskTracker; }
- public int getNumTaskManager() { return numTaskManager; }
+ public void setTaskManagerNumSlots(int taskManagerNumSlots) { this.taskManagerNumSlots = taskManagerNumSlots; }
+
+ public int getTaskManagerNumSlots() { return taskManagerNumSlots; }
// ------------------------------------------------------------------------
// Life cycle and Job Submission
@@ -172,7 +180,7 @@ public class NepheleMiniCluster {
} else {
Configuration conf = getMiniclusterDefaultConfig(jobManagerRpcPort, taskManagerRpcPort,
taskManagerDataPort, memorySize, hdfsConfigFile, lazyMemoryAllocation, defaultOverwriteFiles,
- defaultAlwaysCreateDirectory, numTaskManager);
+ defaultAlwaysCreateDirectory, taskManagerNumSlots, numTaskTracker);
GlobalConfiguration.includeConfiguration(conf);
}
@@ -196,7 +204,7 @@ public class NepheleMiniCluster {
// start the job manager
jobManager = new JobManager(ExecutionMode.LOCAL);
- waitForJobManagerToBecomeReady(numTaskManager);
+ waitForJobManagerToBecomeReady(numTaskTracker);
}
}
@@ -236,7 +244,8 @@ public class NepheleMiniCluster {
public static Configuration getMiniclusterDefaultConfig(int jobManagerRpcPort, int taskManagerRpcPort,
int taskManagerDataPort, long memorySize, String hdfsConfigFile, boolean lazyMemory,
- boolean defaultOverwriteFiles, boolean defaultAlwaysCreateDirectory, int numTaskManager)
+ boolean defaultOverwriteFiles, boolean defaultAlwaysCreateDirectory,
+ int taskManagerNumSlots, int numTaskManager)
{
final Configuration config = new Configuration();
@@ -284,6 +293,8 @@ public class NepheleMiniCluster {
config.setLong(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, memorySize/numTaskManager);
config.setInteger(ConfigConstants.LOCAL_INSTANCE_MANAGER_NUMBER_TASK_MANAGER, numTaskManager);
+
+ config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, taskManagerNumSlots);
return config;
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java b/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
index 00790f4..31138f6 100644
--- a/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
+++ b/stratosphere-clients/src/main/java/eu/stratosphere/client/program/Client.java
@@ -77,7 +77,7 @@ public class Client {
configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerAddress.getAddress().getHostAddress());
configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerAddress.getPort());
- this.compiler = new PactCompiler(new DataStatistics(), new DefaultCostEstimator(), jobManagerAddress);
+ this.compiler = new PactCompiler(new DataStatistics(), new DefaultCostEstimator());
// Disable Local Execution when using a Client
ContextEnvironment.disableLocalExecution();
@@ -104,8 +104,7 @@ public class Client {
throw new CompilerException("Cannot find port to job manager's RPC service in the global configuration.");
}
- final InetSocketAddress jobManagerAddress = new InetSocketAddress(address, port);
- this.compiler = new PactCompiler(new DataStatistics(), new DefaultCostEstimator(), jobManagerAddress);
+ this.compiler = new PactCompiler(new DataStatistics(), new DefaultCostEstimator());
// Disable Local Execution when using a Client
ContextEnvironment.disableLocalExecution();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-clients/src/test/java/eu/stratosphere/client/CliFrontendListCancelTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/test/java/eu/stratosphere/client/CliFrontendListCancelTest.java b/stratosphere-clients/src/test/java/eu/stratosphere/client/CliFrontendListCancelTest.java
index 7ccd420..ba02fa9 100644
--- a/stratosphere-clients/src/test/java/eu/stratosphere/client/CliFrontendListCancelTest.java
+++ b/stratosphere-clients/src/test/java/eu/stratosphere/client/CliFrontendListCancelTest.java
@@ -21,7 +21,6 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.junit.Assert;
@@ -34,8 +33,6 @@ import eu.stratosphere.nephele.client.JobProgressResult;
import eu.stratosphere.nephele.client.JobSubmissionResult;
import eu.stratosphere.nephele.event.job.AbstractEvent;
import eu.stratosphere.nephele.event.job.RecentJobEvent;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.managementgraph.ManagementGraph;
@@ -202,18 +199,18 @@ public class CliFrontendListCancelTest {
}
@Override
- public Map<InstanceType, InstanceTypeDescription> getMapOfAvailableInstanceTypes() throws IOException {
+ public void logBufferUtilization(JobID jobID) throws IOException {
throw new UnsupportedOperationException();
}
@Override
- public void logBufferUtilization(JobID jobID) throws IOException {
+ public NetworkTopology getNetworkTopology(JobID jobID) throws IOException {
throw new UnsupportedOperationException();
}
@Override
- public NetworkTopology getNetworkTopology(JobID jobID) throws IOException {
- throw new UnsupportedOperationException();
+ public int getAvailableSlots() {
+ return 1;
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-clients/src/test/java/eu/stratosphere/client/testjar/WordCount.java
----------------------------------------------------------------------
diff --git a/stratosphere-clients/src/test/java/eu/stratosphere/client/testjar/WordCount.java b/stratosphere-clients/src/test/java/eu/stratosphere/client/testjar/WordCount.java
index e827805..5218dc2 100644
--- a/stratosphere-clients/src/test/java/eu/stratosphere/client/testjar/WordCount.java
+++ b/stratosphere-clients/src/test/java/eu/stratosphere/client/testjar/WordCount.java
@@ -70,9 +70,10 @@ public class WordCount {
* FlatMapFunction. The function takes a line (String) and splits it into
* multiple pairs in the form of "(word,1)" (Tuple2<String, Integer>).
*/
- @SuppressWarnings("serial")
public static final class Tokenizer extends FlatMapFunction<String, Tuple2<String, Integer>> {
+ private static final long serialVersionUID = 1L;
+
@Override
public void flatMap(String value, Collector<Tuple2<String, Integer>> out) {
// normalize and split the line
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java
index 2076902..bf3d6af 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java
@@ -13,8 +13,6 @@
package eu.stratosphere.compiler;
-import java.io.IOException;
-import java.net.InetSocketAddress;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
@@ -90,11 +88,6 @@ import eu.stratosphere.compiler.postpass.OptimizerPostPass;
import eu.stratosphere.configuration.ConfigConstants;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.configuration.GlobalConfiguration;
-import eu.stratosphere.nephele.instance.InstanceType;
-import eu.stratosphere.nephele.instance.InstanceTypeDescription;
-import eu.stratosphere.nephele.ipc.RPC;
-import eu.stratosphere.nephele.net.NetUtils;
-import eu.stratosphere.nephele.protocols.ExtendedManagementProtocol;
import eu.stratosphere.pact.runtime.shipping.ShipStrategyType;
import eu.stratosphere.pact.runtime.task.util.LocalStrategy;
import eu.stratosphere.util.InstantiationUtil;
@@ -340,24 +333,10 @@ public class PactCompiler {
private final CostEstimator costEstimator;
/**
- * The connection used to connect to the job-manager.
- */
- private final InetSocketAddress jobManagerAddress;
-
- /**
- * The maximum number of machines (instances) to use, per the configuration.
- */
- private int maxMachines;
-
- /**
* The default degree of parallelism for jobs compiled by this compiler.
*/
private int defaultDegreeOfParallelism;
- /**
- * The maximum number of subtasks that should share an instance.
- */
- private int maxIntraNodeParallelism;
// ------------------------------------------------------------------------
// Constructor & Setup
@@ -420,106 +399,29 @@ public class PactCompiler {
* The <tt>CostEstimator</tt> to use to cost the individual operations.
*/
public PactCompiler(DataStatistics stats, CostEstimator estimator) {
- this(stats, estimator, null);
- }
-
- /**
- * Creates a new compiler instance that uses the statistics object to determine properties about the input.
- * Given those statistics, the compiler can make better choices for the execution strategies.
- * as if no filesystem was given. It uses the given cost estimator to compute the costs of the individual
- * operations.
- * <p>
- * The given socket-address is used to connect to the job manager to obtain system characteristics, like available
- * memory. If that parameter is null, then the address is obtained from the global configuration.
- *
- * @param stats
- * The statistics to be used to determine the input properties.
- * @param estimator
- * The <tt>CostEstimator</tt> to use to cost the individual operations.
- * @param jobManagerConnection
- * The address of the job manager that is queried for system characteristics.
- */
- public PactCompiler(DataStatistics stats, CostEstimator estimator, InetSocketAddress jobManagerConnection) {
this.statistics = stats;
this.costEstimator = estimator;
Configuration config = GlobalConfiguration.getConfiguration();
- // determine the maximum number of instances to use
- this.maxMachines = -1;
-
// determine the default parallelization degree
this.defaultDegreeOfParallelism = config.getInteger(ConfigConstants.DEFAULT_PARALLELIZATION_DEGREE_KEY,
ConfigConstants.DEFAULT_PARALLELIZATION_DEGREE);
-
- // determine the default intra-node parallelism
- int maxInNodePar = config.getInteger(ConfigConstants.PARALLELIZATION_MAX_INTRA_NODE_DEGREE_KEY,
- ConfigConstants.DEFAULT_MAX_INTRA_NODE_PARALLELIZATION_DEGREE);
- if (maxInNodePar == 0 || maxInNodePar < -1) {
- LOG.error("Invalid maximum degree of intra-node parallelism: " + maxInNodePar +
- ". Ignoring parameter.");
- maxInNodePar = ConfigConstants.DEFAULT_MAX_INTRA_NODE_PARALLELIZATION_DEGREE;
- }
- this.maxIntraNodeParallelism = maxInNodePar;
-
- // assign the connection to the job-manager
- if (jobManagerConnection != null) {
- this.jobManagerAddress = jobManagerConnection;
- } else {
- final String address = config.getString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, null);
- if (address == null) {
- throw new CompilerException(
- "Cannot find address to job manager's RPC service in the global configuration.");
- }
-
- final int port = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY,
- ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT);
- if (port < 0) {
- throw new CompilerException(
- "Cannot find port to job manager's RPC service in the global configuration.");
- }
-
- this.jobManagerAddress = new InetSocketAddress(address, port);
- }
}
// ------------------------------------------------------------------------
// Getters / Setters
// ------------------------------------------------------------------------
- public int getMaxMachines() {
- return maxMachines;
- }
-
- public void setMaxMachines(int maxMachines) {
- if (maxMachines == -1 || maxMachines > 0) {
- this.maxMachines = maxMachines;
- } else {
- throw new IllegalArgumentException();
- }
- }
-
public int getDefaultDegreeOfParallelism() {
return defaultDegreeOfParallelism;
}
public void setDefaultDegreeOfParallelism(int defaultDegreeOfParallelism) {
- if (defaultDegreeOfParallelism == -1 || defaultDegreeOfParallelism > 0) {
+ if (defaultDegreeOfParallelism > 0) {
this.defaultDegreeOfParallelism = defaultDegreeOfParallelism;
} else {
- throw new IllegalArgumentException();
- }
- }
-
- public int getMaxIntraNodeParallelism() {
- return maxIntraNodeParallelism;
- }
-
- public void setMaxIntraNodeParallelism(int maxIntraNodeParallelism) {
- if (maxIntraNodeParallelism == -1 || maxIntraNodeParallelism > 0) {
- this.maxIntraNodeParallelism = maxIntraNodeParallelism;
- } else {
- throw new IllegalArgumentException();
+ throw new IllegalArgumentException("Default parallelism cannot be zero or negative.");
}
}
@@ -550,14 +452,9 @@ public class PactCompiler {
// -------------------- try to get the connection to the job manager ----------------------
// --------------------------to obtain instance information --------------------------------
final OptimizerPostPass postPasser = getPostPassFromPlan(program);
- return compile(program, getInstanceTypeInfo(), postPasser);
- }
-
- public OptimizedPlan compile(Plan program, InstanceTypeDescription type) throws CompilerException {
- final OptimizerPostPass postPasser = getPostPassFromPlan(program);
- return compile(program, type, postPasser);
+ return compile(program, postPasser);
}
-
+
/**
* Translates the given pact plan in to an OptimizedPlan, where all nodes have their local strategy assigned
* and all channels have a shipping strategy assigned. The process goes through several phases:
@@ -569,8 +466,6 @@ public class PactCompiler {
* </ol>
*
* @param program The program to be translated.
- * @param type The instance type to schedule the execution on. Used also to determine the amount of memory
- * available to the tasks.
* @param postPasser The function to be used for post passing the optimizer's plan and setting the
* data type specific serialization routines.
* @return The optimized plan.
@@ -579,8 +474,8 @@ public class PactCompiler {
* Thrown, if the plan is invalid or the optimizer encountered an inconsistent
* situation during the compilation process.
*/
- private OptimizedPlan compile(Plan program, InstanceTypeDescription type, OptimizerPostPass postPasser) throws CompilerException {
- if (program == null || type == null || postPasser == null) {
+ private OptimizedPlan compile(Plan program, OptimizerPostPass postPasser) throws CompilerException {
+ if (program == null || postPasser == null) {
throw new NullPointerException();
}
@@ -588,73 +483,14 @@ public class PactCompiler {
if (LOG.isDebugEnabled()) {
LOG.debug("Beginning compilation of program '" + program.getJobName() + '\'');
}
-
- final String instanceName = type.getInstanceType().getIdentifier();
-
- // we subtract some percentage of the memory to accommodate for rounding errors
- final long memoryPerInstance = (long) (type.getHardwareDescription().getSizeOfFreeMemory() * 0.96f);
- final int numInstances = type.getMaximumNumberOfAvailableInstances();
-
- // determine the maximum number of machines to use
- int maxMachinesJob = program.getMaxNumberMachines();
-
- if (maxMachinesJob < 1) {
- maxMachinesJob = this.maxMachines;
- } else if (this.maxMachines >= 1) {
- // check if the program requested more than the global config allowed
- if (maxMachinesJob > this.maxMachines && LOG.isWarnEnabled()) {
- LOG.warn("Maximal number of machines specified in program (" + maxMachinesJob
- + ") exceeds the maximum number in the global configuration (" + this.maxMachines
- + "). Using the global configuration value.");
- }
-
- maxMachinesJob = Math.min(maxMachinesJob, this.maxMachines);
- }
-
- // adjust the maximum number of machines the the number of available instances
- if (maxMachinesJob < 1) {
- maxMachinesJob = numInstances;
- } else if (maxMachinesJob > numInstances) {
- maxMachinesJob = numInstances;
- if (LOG.isInfoEnabled()) {
- LOG.info("Maximal number of machines decreased to " + maxMachinesJob +
- " because no more instances are available.");
- }
- }
// set the default degree of parallelism
int defaultParallelism = program.getDefaultParallelism() > 0 ?
program.getDefaultParallelism() : this.defaultDegreeOfParallelism;
-
- if (this.maxIntraNodeParallelism > 0) {
- if (defaultParallelism < 1) {
- defaultParallelism = maxMachinesJob * this.maxIntraNodeParallelism;
- }
- else if (defaultParallelism > maxMachinesJob * this.maxIntraNodeParallelism) {
- int oldParallelism = defaultParallelism;
- defaultParallelism = maxMachinesJob * this.maxIntraNodeParallelism;
-
- if (LOG.isInfoEnabled()) {
- LOG.info("Decreasing default degree of parallelism from " + oldParallelism +
- " to " + defaultParallelism + " to fit a maximum number of " + maxMachinesJob +
- " instances with a intra-parallelism of " + this.maxIntraNodeParallelism);
- }
- }
- } else if (defaultParallelism < 1) {
- defaultParallelism = maxMachinesJob;
- if (LOG.isInfoEnabled()) {
- LOG.info("No default parallelism specified. Using default parallelism of " + defaultParallelism + " (One task per instance)");
- }
- }
// log the output
if (LOG.isDebugEnabled()) {
- LOG.debug("Using a default degree of parallelism of " + defaultParallelism +
- ", a maximum intra-node parallelism of " + this.maxIntraNodeParallelism + '.');
- if (this.maxMachines > 0) {
- LOG.debug("The execution is limited to a maximum number of " + maxMachinesJob + " machines.");
- }
-
+ LOG.debug("Using a default degree of parallelism of " + defaultParallelism + '.');
}
// the first step in the compilation is to create the optimizer plan representation
@@ -666,7 +502,7 @@ public class PactCompiler {
// 4) It makes estimates about the data volume of the data sources and
// propagates those estimates through the plan
- GraphCreatingVisitor graphCreator = new GraphCreatingVisitor(maxMachinesJob, defaultParallelism);
+ GraphCreatingVisitor graphCreator = new GraphCreatingVisitor(defaultParallelism);
program.accept(graphCreator);
// if we have a plan with multiple data sinks, add logical optimizer nodes that have two data-sinks as children
@@ -689,8 +525,7 @@ public class PactCompiler {
// now that we have all nodes created and recorded which ones consume memory, tell the nodes their minimal
// guaranteed memory, for further cost estimations. we assume an equal distribution of memory among consumer tasks
- rootNode.accept(new IdAndMemoryAndEstimatesVisitor(this.statistics,
- graphCreator.getMemoryConsumerCount() == 0 ? 0 : memoryPerInstance / graphCreator.getMemoryConsumerCount()));
+ rootNode.accept(new IdAndEstimatesVisitor(this.statistics));
// Now that the previous step is done, the next step is to traverse the graph again for the two
// steps that cannot directly be performed during the plan enumeration, because we are dealing with DAGs
@@ -733,9 +568,8 @@ public class PactCompiler {
dp.resolveDeadlocks(bestPlanSinks);
// finalize the plan
- OptimizedPlan plan = new PlanFinalizer().createFinalPlan(bestPlanSinks, program.getJobName(), program, memoryPerInstance);
- plan.setInstanceTypeName(instanceName);
-
+ OptimizedPlan plan = new PlanFinalizer().createFinalPlan(bestPlanSinks, program.getJobName(), program);
+
// swap the binary unions for n-ary unions. this changes no strategies or memory consumers whatsoever, so
// we can do this after the plan finalization
plan.accept(new BinaryUnionReplacer());
@@ -755,7 +589,7 @@ public class PactCompiler {
* from the plan can be traversed.
*/
public static List<DataSinkNode> createPreOptimizedPlan(Plan program) {
- GraphCreatingVisitor graphCreator = new GraphCreatingVisitor(-1, 1);
+ GraphCreatingVisitor graphCreator = new GraphCreatingVisitor(1);
program.accept(graphCreator);
return graphCreator.sinks;
}
@@ -783,22 +617,18 @@ public class PactCompiler {
private final List<DataSinkNode> sinks; // all data sink nodes in the optimizer plan
- private final int maxMachines; // the maximum number of machines to use
-
private final int defaultParallelism; // the default degree of parallelism
- private int numMemoryConsumers;
-
private final GraphCreatingVisitor parent; // reference to enclosing creator, in case of a recursive translation
private final boolean forceDOP;
- private GraphCreatingVisitor(int maxMachines, int defaultParallelism) {
- this(null, false, maxMachines, defaultParallelism, null);
+ private GraphCreatingVisitor(int defaultParallelism) {
+ this(null, false, defaultParallelism, null);
}
- private GraphCreatingVisitor(GraphCreatingVisitor parent, boolean forceDOP, int maxMachines,
+ private GraphCreatingVisitor(GraphCreatingVisitor parent, boolean forceDOP,
int defaultParallelism, HashMap<Operator<?>, OptimizerNode> closure) {
if (closure == null){
con2node = new HashMap<Operator<?>, OptimizerNode>();
@@ -807,7 +637,6 @@ public class PactCompiler {
}
this.sources = new ArrayList<DataSourceNode>(4);
this.sinks = new ArrayList<DataSinkNode>(2);
- this.maxMachines = maxMachines;
this.defaultParallelism = defaultParallelism;
this.parent = parent;
this.forceDOP = forceDOP;
@@ -878,7 +707,6 @@ public class PactCompiler {
// catch this for the recursive translation of step functions
BulkPartialSolutionNode p = new BulkPartialSolutionNode(holder, containingIterationNode);
p.setDegreeOfParallelism(containingIterationNode.getDegreeOfParallelism());
- p.setSubtasksPerInstance(containingIterationNode.getSubtasksPerInstance());
n = p;
}
else if (c instanceof WorksetPlaceHolder) {
@@ -890,7 +718,6 @@ public class PactCompiler {
// catch this for the recursive translation of step functions
WorksetNode p = new WorksetNode(holder, containingIterationNode);
p.setDegreeOfParallelism(containingIterationNode.getDegreeOfParallelism());
- p.setSubtasksPerInstance(containingIterationNode.getSubtasksPerInstance());
n = p;
}
else if (c instanceof SolutionSetPlaceHolder) {
@@ -902,18 +729,14 @@ public class PactCompiler {
// catch this for the recursive translation of step functions
SolutionSetNode p = new SolutionSetNode(holder, containingIterationNode);
p.setDegreeOfParallelism(containingIterationNode.getDegreeOfParallelism());
- p.setSubtasksPerInstance(containingIterationNode.getSubtasksPerInstance());
n = p;
}
else {
- throw new IllegalArgumentException("Unknown operator type: " + c.getClass() + " " + c);
+ throw new IllegalArgumentException("Unknown operator type: " + c);
}
this.con2node.put(c, n);
- // record the potential memory consumption
- this.numMemoryConsumers += n.isMemoryConsumer() ? 1 : 0;
-
// set the parallelism only if it has not been set before. some nodes have a fixed DOP, such as the
// key-less reducer (all-reduce)
if (n.getDegreeOfParallelism() < 1) {
@@ -931,19 +754,6 @@ public class PactCompiler {
n.setDegreeOfParallelism(par);
}
- // check if we need to set the instance sharing accordingly such that
- // the maximum number of machines is not exceeded
- if (n.getSubtasksPerInstance() < 1) {
- int tasksPerInstance = 1;
- if (this.maxMachines > 0) {
- int p = n.getDegreeOfParallelism();
- tasksPerInstance = (p / this.maxMachines) + (p % this.maxMachines == 0 ? 0 : 1);
- }
-
- // we group together n tasks per machine, depending on config and the above computed
- // value required to obey the maximum number of machines
- n.setSubtasksPerInstance(tasksPerInstance);
- }
return true;
}
@@ -966,7 +776,7 @@ public class PactCompiler {
// first, recursively build the data flow for the step function
final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true,
- this.maxMachines, iterNode.getDegreeOfParallelism(), closure);
+ iterNode.getDegreeOfParallelism(), closure);
BulkPartialSolutionNode partialSolution = null;
@@ -994,9 +804,6 @@ public class PactCompiler {
iterNode.setNextPartialSolution(rootOfStepFunction, terminationCriterion);
iterNode.setPartialSolution(partialSolution);
- // account for the nested memory consumers
- this.numMemoryConsumers += recursiveCreator.numMemoryConsumers;
-
// go over the contained data flow and mark the dynamic path nodes
StaticDynamicPathIdentifier identifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
rootOfStepFunction.accept(identifier);
@@ -1013,7 +820,7 @@ public class PactCompiler {
// first, recursively build the data flow for the step function
final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true,
- this.maxMachines, iterNode.getDegreeOfParallelism(), closure);
+ iterNode.getDegreeOfParallelism(), closure);
// descend from the solution set delta. check that it depends on both the workset
// and the solution set. If it does depend on both, this descend should create both nodes
iter.getSolutionSetDelta().accept(recursiveCreator);
@@ -1067,19 +874,12 @@ public class PactCompiler {
iterNode.setPartialSolution(solutionSetNode, worksetNode);
iterNode.setNextPartialSolution(solutionSetDeltaNode, nextWorksetNode);
- // account for the nested memory consumers
- this.numMemoryConsumers += recursiveCreator.numMemoryConsumers;
-
// go over the contained data flow and mark the dynamic path nodes
StaticDynamicPathIdentifier pathIdentifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
nextWorksetNode.accept(pathIdentifier);
iterNode.getSolutionSetDelta().accept(pathIdentifier);
}
}
-
- int getMemoryConsumerCount() {
- return this.numMemoryConsumers;
- }
};
private static final class StaticDynamicPathIdentifier implements Visitor<OptimizerNode> {
@@ -1107,17 +907,14 @@ public class PactCompiler {
* Simple visitor that sets the minimal guaranteed memory per task based on the amount of available memory,
* the number of memory consumers, and on the task's degree of parallelism.
*/
- private static final class IdAndMemoryAndEstimatesVisitor implements Visitor<OptimizerNode> {
+ private static final class IdAndEstimatesVisitor implements Visitor<OptimizerNode> {
private final DataStatistics statistics;
-
- private final long memoryPerTaskPerInstance;
-
+
private int id = 1;
- private IdAndMemoryAndEstimatesVisitor(DataStatistics statistics, long memoryPerTaskPerInstance) {
+ private IdAndEstimatesVisitor(DataStatistics statistics) {
this.statistics = statistics;
- this.memoryPerTaskPerInstance = memoryPerTaskPerInstance;
}
@@ -1128,11 +925,6 @@ public class PactCompiler {
return false;
}
- // assign minimum memory share, for lower bound estimates
- final long mem = visitable.isMemoryConsumer() ?
- this.memoryPerTaskPerInstance / visitable.getSubtasksPerInstance() : 0;
- visitable.setMinimalMemoryPerSubTask(mem);
-
return true;
}
@@ -1234,8 +1026,6 @@ public class PactCompiler {
private final Deque<IterationPlanNode> stackOfIterationNodes;
- private long memoryPerInstance; // the amount of memory per instance
-
private int memoryConsumerWeights; // a counter of all memory consumers
/**
@@ -1248,12 +1038,7 @@ public class PactCompiler {
this.stackOfIterationNodes = new ArrayDeque<IterationPlanNode>();
}
- private OptimizedPlan createFinalPlan(List<SinkPlanNode> sinks, String jobName, Plan originalPlan, long memPerInstance) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Available memory per instance: " + memPerInstance);
- }
-
- this.memoryPerInstance = memPerInstance;
+ private OptimizedPlan createFinalPlan(List<SinkPlanNode> sinks, String jobName, Plan originalPlan) {
this.memoryConsumerWeights = 0;
// traverse the graph
@@ -1263,44 +1048,36 @@ public class PactCompiler {
// assign the memory to each node
if (this.memoryConsumerWeights > 0) {
- final long memoryPerInstanceAndWeight = this.memoryPerInstance / this.memoryConsumerWeights;
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Memory per consumer weight: " + memoryPerInstanceAndWeight);
- }
-
for (PlanNode node : this.allNodes) {
// assign memory to the driver strategy of the node
final int consumerWeight = node.getMemoryConsumerWeight();
if (consumerWeight > 0) {
- final long mem = memoryPerInstanceAndWeight * consumerWeight / node.getSubtasksPerInstance();
- node.setMemoryPerSubTask(mem);
+ final double relativeMem = (double)consumerWeight / this.memoryConsumerWeights;
+ node.setRelativeMemoryPerSubtask(relativeMem);
if (LOG.isDebugEnabled()) {
- final long mib = mem >> 20;
- LOG.debug("Assigned " + mib + " MiBytes memory to each subtask of " +
- node.getPactContract().getName() + " (" + mib * node.getDegreeOfParallelism() +
- " MiBytes total.)");
+ LOG.debug("Assigned " + relativeMem + " of total memory to each subtask of " +
+ node.getPactContract().getName() + ".");
}
}
// assign memory to the local and global strategies of the channels
for (Channel c : node.getInputs()) {
if (c.getLocalStrategy().dams()) {
- final long mem = memoryPerInstanceAndWeight / node.getSubtasksPerInstance();
- c.setMemoryLocalStrategy(mem);
+ final double relativeMem = 1.0 / this.memoryConsumerWeights;
+ c.setRelativeMemoryLocalStrategy(relativeMem);
if (LOG.isDebugEnabled()) {
- final long mib = mem >> 20;
- LOG.debug("Assigned " + mib + " MiBytes memory to each local strategy instance of " +
- c + " (" + mib * node.getDegreeOfParallelism() + " MiBytes total.)");
+ LOG.debug("Assigned " + relativeMem + " of total memory to each local strategy " +
+ "instance of " + c + ".");
}
}
if (c.getTempMode() != TempMode.NONE) {
- final long mem = memoryPerInstanceAndWeight / node.getSubtasksPerInstance();
- c.setTempMemory(mem);
+ final double relativeMem = 1.0/ this.memoryConsumerWeights;
+ c.setRelativeTempMemory(relativeMem);
if (LOG.isDebugEnabled()) {
- final long mib = mem >> 20;
- LOG.debug("Assigned " + mib + " MiBytes memory to each instance of the temp table for " +
- c + " (" + mib * node.getDegreeOfParallelism() + " MiBytes total.)");
+ LOG.debug("Assigned " + relativeMem + " of total memory to each instance of the temp " +
+ "table" +
+ " " +
+ "for " + c + ".");
}
}
}
@@ -1525,182 +1302,4 @@ public class PactCompiler {
throw new CompilerException("Class '" + className + "' is not an optimizer post passer.", ccex);
}
}
-
- private InstanceTypeDescription getInstanceTypeInfo() {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Connecting compiler to JobManager to dertermine instance information.");
- }
-
- // create the connection in a separate thread, such that this thread
- // can abort, if an unsuccessful connection occurs.
- Map<InstanceType, InstanceTypeDescription> instances = null;
-
- JobManagerConnector jmc = new JobManagerConnector(this.jobManagerAddress);
- Thread connectorThread = new Thread(jmc, "Compiler - JobManager connector.");
- connectorThread.setDaemon(true);
- connectorThread.start();
-
- // connect and get the result
- try {
- jmc.waitForCompletion();
- instances = jmc.instances;
- if (instances == null) {
- throw new NullPointerException("Returned instance map is <null>");
- }
- }
- catch (IOException e) {
- throw new CompilerException(e.getMessage());
- }
- catch (Throwable t) {
- throw new CompilerException("Cannot connect to the JobManager to determine the available TaskManagers. "
- + "Check if the JobManager is running (using the web interface or log files). Reason: " +
- t.getMessage(), t);
- }
-
- // determine which type to run on
- return getType(instances);
- }
-
- /**
- * This utility method picks the instance type to be used for executing programs.
- * <p>
- *
- * @param types The available types.
- * @return The type to be used for scheduling.
- *
- * @throws CompilerException
- * @throws IllegalArgumentException
- */
- private InstanceTypeDescription getType(Map<InstanceType, InstanceTypeDescription> types)
- throws CompilerException
- {
- if (types == null || types.size() < 1) {
- throw new IllegalArgumentException("No instance type found.");
- }
-
- InstanceTypeDescription retValue = null;
- long totalMemory = 0;
- int numInstances = 0;
-
- final Iterator<InstanceTypeDescription> it = types.values().iterator();
- while(it.hasNext())
- {
- final InstanceTypeDescription descr = it.next();
-
- // skip instances for which no hardware description is available
- // this means typically that no
- if (descr.getHardwareDescription() == null || descr.getInstanceType() == null) {
- continue;
- }
-
- final int curInstances = descr.getMaximumNumberOfAvailableInstances();
- final long curMemory = curInstances * descr.getHardwareDescription().getSizeOfFreeMemory();
-
- // get, if first, or if it has more instances and not less memory, or if it has significantly more memory
- // and the same number of cores still
- if ( (retValue == null) ||
- (curInstances > numInstances && (int) (curMemory * 1.2f) > totalMemory) ||
- (curInstances * retValue.getInstanceType().getNumberOfCores() >= numInstances &&
- (int) (curMemory * 1.5f) > totalMemory)
- )
- {
- retValue = descr;
- numInstances = curInstances;
- totalMemory = curMemory;
- }
- }
-
- if (retValue == null) {
- throw new CompilerException("No instance currently registered at the job-manager. Retry later.\n" +
- "If the system has recently started, it may take a few seconds until the instances register.");
- }
-
- return retValue;
- }
-
- /**
- * Utility class for an asynchronous connection to the job manager to determine the available instances.
- */
- private static final class JobManagerConnector implements Runnable {
-
- private static final long MAX_MILLIS_TO_WAIT = 10000;
-
- private final InetSocketAddress jobManagerAddress;
-
- private final Object lock = new Object();
-
- private volatile Map<InstanceType, InstanceTypeDescription> instances;
-
- private volatile Throwable error;
-
-
- private JobManagerConnector(InetSocketAddress jobManagerAddress) {
- this.jobManagerAddress = jobManagerAddress;
- }
-
-
- public Map<InstanceType, InstanceTypeDescription> waitForCompletion() throws Throwable {
- long start = System.currentTimeMillis();
- long remaining = MAX_MILLIS_TO_WAIT;
-
- if (this.error != null) {
- throw this.error;
- }
- if (this.instances != null) {
- return this.instances;
- }
-
- do {
- try {
- synchronized (this.lock) {
- this.lock.wait(remaining);
- }
- } catch (InterruptedException iex) {}
- }
- while (this.error == null && this.instances == null &&
- (remaining = MAX_MILLIS_TO_WAIT + start - System.currentTimeMillis()) > 0);
-
- if (this.error != null) {
- throw this.error;
- }
- if (this.instances != null) {
- return this.instances;
- }
-
- throw new IOException("Could not connect to the JobManager at " + jobManagerAddress +
- ". Please make sure that the Job Manager is started properly.");
- }
-
-
- @Override
- public void run() {
- ExtendedManagementProtocol jobManagerConnection = null;
-
- try {
- jobManagerConnection = RPC.getProxy(ExtendedManagementProtocol.class,
- this.jobManagerAddress, NetUtils.getSocketFactory());
-
- this.instances = jobManagerConnection.getMapOfAvailableInstanceTypes();
- if (this.instances == null) {
- throw new IOException("Returned instance map was <null>");
- }
- } catch (Throwable t) {
- this.error = t;
- } finally {
- // first of all, signal completion
- synchronized (this.lock) {
- this.lock.notifyAll();
- }
-
- if (jobManagerConnection != null) {
- try {
- RPC.stopProxy(jobManagerConnection);
- } catch (Throwable t) {
- LOG.error("Could not cleanly shut down connection from compiler to job manager,", t);
- }
- }
- jobManagerConnection = null;
- }
- }
- }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
index 058af1a..fde5970 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/costs/DefaultCostEstimator.java
@@ -95,14 +95,20 @@ public class DefaultCostEstimator extends CostEstimator {
@Override
public void addBroadcastCost(EstimateProvider estimates, int replicationFactor, Costs costs) {
- // assumption: we need ship the whole data over the network to each node.
- final long estOutShipSize = estimates.getEstimatedOutputSize();
- if (estOutShipSize <= 0) {
- costs.setNetworkCost(Costs.UNKNOWN);
+ // if our replication factor is negative, we cannot calculate broadcast costs
+
+ if (replicationFactor > 0) {
+ // assumption: we need ship the whole data over the network to each node.
+ final long estOutShipSize = estimates.getEstimatedOutputSize();
+ if (estOutShipSize <= 0) {
+ costs.setNetworkCost(Costs.UNKNOWN);
+ } else {
+ costs.addNetworkCost(replicationFactor * estOutShipSize);
+ }
+ costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE * replicationFactor);
} else {
- costs.addNetworkCost(replicationFactor * estOutShipSize);
+ costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE * 200);
}
- costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE * replicationFactor * 100);
}
// --------------------------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/AbstractPartialSolutionNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/AbstractPartialSolutionNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/AbstractPartialSolutionNode.java
index 8fd6f79..2f7cb2b 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/AbstractPartialSolutionNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/AbstractPartialSolutionNode.java
@@ -42,11 +42,6 @@ public abstract class AbstractPartialSolutionNode extends OptimizerNode {
public abstract IterationNode getIterationNode();
// --------------------------------------------------------------------------------------------
-
- @Override
- public boolean isMemoryConsumer() {
- return false;
- }
public boolean isOnDynamicPath() {
return true;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BinaryUnionNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BinaryUnionNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BinaryUnionNode.java
index 70752b5..50ec01b 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BinaryUnionNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BinaryUnionNode.java
@@ -122,20 +122,12 @@ public class BinaryUnionNode extends TwoInputNode {
final RequestedLocalProperties noLocalProps = new RequestedLocalProperties();
final int dop = getDegreeOfParallelism();
- final int subPerInstance = getSubtasksPerInstance();
- final int numInstances = dop / subPerInstance + (dop % subPerInstance == 0 ? 0 : 1);
final int inDop1 = getFirstPredecessorNode().getDegreeOfParallelism();
- final int inSubPerInstance1 = getFirstPredecessorNode().getSubtasksPerInstance();
- final int inNumInstances1 = inDop1 / inSubPerInstance1 + (inDop1 % inSubPerInstance1 == 0 ? 0 : 1);
final int inDop2 = getSecondPredecessorNode().getDegreeOfParallelism();
- final int inSubPerInstance2 = getSecondPredecessorNode().getSubtasksPerInstance();
- final int inNumInstances2 = inDop2 / inSubPerInstance2 + (inDop2 % inSubPerInstance2 == 0 ? 0 : 1);
-
- final boolean globalDopChange1 = numInstances != inNumInstances1;
- final boolean globalDopChange2 = numInstances != inNumInstances2;
- final boolean localDopChange1 = numInstances == inNumInstances1 & subPerInstance != inSubPerInstance1;
- final boolean localDopChange2 = numInstances == inNumInstances2 & subPerInstance != inSubPerInstance2;
-
+
+ final boolean dopChange1 = dop != inDop1;
+ final boolean dopChange2 = dop != inDop2;
+
// enumerate all pairwise combination of the children's plans together with
// all possible operator strategy combination
@@ -154,15 +146,11 @@ public class BinaryUnionNode extends TwoInputNode {
Channel c1 = new Channel(child1, this.input1.getMaterializationMode());
if (this.input1.getShipStrategy() == null) {
// free to choose the ship strategy
- igps.parameterizeChannel(c1, globalDopChange1, localDopChange1);
+ igps.parameterizeChannel(c1, dopChange1);
// if the DOP changed, make sure that we cancel out properties, unless the
// ship strategy preserves/establishes them even under changing DOPs
- if (globalDopChange1 && !c1.getShipStrategy().isNetworkStrategy()) {
- c1.getGlobalProperties().reset();
- }
- if (localDopChange1 && !(c1.getShipStrategy().isNetworkStrategy() ||
- c1.getShipStrategy().compensatesForLocalDOPChanges())) {
+ if (dopChange1 && !c1.getShipStrategy().isNetworkStrategy()) {
c1.getGlobalProperties().reset();
}
} else {
@@ -173,10 +161,8 @@ public class BinaryUnionNode extends TwoInputNode {
c1.setShipStrategy(this.input1.getShipStrategy());
}
- if (globalDopChange1) {
+ if (dopChange1) {
c1.adjustGlobalPropertiesForFullParallelismChange();
- } else if (localDopChange1) {
- c1.adjustGlobalPropertiesForLocalParallelismChange();
}
}
@@ -184,15 +170,11 @@ public class BinaryUnionNode extends TwoInputNode {
Channel c2 = new Channel(child2, this.input2.getMaterializationMode());
if (this.input2.getShipStrategy() == null) {
// free to choose the ship strategy
- igps.parameterizeChannel(c2, globalDopChange2, localDopChange2);
+ igps.parameterizeChannel(c2, dopChange2);
// if the DOP changed, make sure that we cancel out properties, unless the
// ship strategy preserves/establishes them even under changing DOPs
- if (globalDopChange2 && !c2.getShipStrategy().isNetworkStrategy()) {
- c2.getGlobalProperties().reset();
- }
- if (localDopChange2 && !(c2.getShipStrategy().isNetworkStrategy() ||
- c2.getShipStrategy().compensatesForLocalDOPChanges())) {
+ if (dopChange2 && !c2.getShipStrategy().isNetworkStrategy()) {
c2.getGlobalProperties().reset();
}
} else {
@@ -203,10 +185,8 @@ public class BinaryUnionNode extends TwoInputNode {
c2.setShipStrategy(this.input2.getShipStrategy());
}
- if (globalDopChange2) {
+ if (dopChange2) {
c2.adjustGlobalPropertiesForFullParallelismChange();
- } else if (localDopChange2) {
- c2.adjustGlobalPropertiesForLocalParallelismChange();
}
}
@@ -224,20 +204,20 @@ public class BinaryUnionNode extends TwoInputNode {
if (c1.getShipStrategy() == ShipStrategyType.FORWARD && c2.getShipStrategy() != ShipStrategyType.FORWARD) {
// adjust c2 to c1
c2 = c2.clone();
- p1.parameterizeChannel(c2,globalDopChange2);
+ p1.parameterizeChannel(c2,dopChange2);
} else if (c2.getShipStrategy() == ShipStrategyType.FORWARD && c1.getShipStrategy() != ShipStrategyType.FORWARD) {
// adjust c1 to c2
c1 = c1.clone();
- p2.parameterizeChannel(c1,globalDopChange1);
+ p2.parameterizeChannel(c1,dopChange1);
} else if (c1.getShipStrategy() == ShipStrategyType.FORWARD && c2.getShipStrategy() == ShipStrategyType.FORWARD) {
boolean adjustC1 = c1.getEstimatedOutputSize() <= 0 || c2.getEstimatedOutputSize() <= 0 ||
c1.getEstimatedOutputSize() <= c2.getEstimatedOutputSize();
if (adjustC1) {
c2 = c2.clone();
- p1.parameterizeChannel(c2, globalDopChange2);
+ p1.parameterizeChannel(c2, dopChange2);
} else {
c1 = c1.clone();
- p2.parameterizeChannel(c1, globalDopChange1);
+ p2.parameterizeChannel(c1, dopChange1);
}
} else {
// this should never happen, as it implies both realize a different strategy, which is
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BulkIterationNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BulkIterationNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BulkIterationNode.java
index f6720ea..bfbca15 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BulkIterationNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/BulkIterationNode.java
@@ -65,9 +65,9 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode
// --------------------------------------------------------------------------------------------
/**
- * Creates a new node with a single input for the optimizer plan.
+ * Creates a new node for the bulk iteration.
*
- * @param iteration The PACT that the node represents.
+ * @param iteration The bulk iteration the node represents.
*/
public BulkIterationNode(BulkIterationBase<?> iteration) {
super(iteration);
@@ -124,14 +124,12 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode
public void setNextPartialSolution(OptimizerNode nextPartialSolution, OptimizerNode terminationCriterion) {
// check if the root of the step function has the same DOP as the iteration
- if (nextPartialSolution.getDegreeOfParallelism() != getDegreeOfParallelism() ||
- nextPartialSolution.getSubtasksPerInstance() != getSubtasksPerInstance() )
+ if (nextPartialSolution.getDegreeOfParallelism() != getDegreeOfParallelism())
{
// add a no-op to the root to express the re-partitioning
NoOpNode noop = new NoOpNode();
noop.setDegreeOfParallelism(getDegreeOfParallelism());
- noop.setSubtasksPerInstance(getSubtasksPerInstance());
-
+
PactConnection noOpConn = new PactConnection(nextPartialSolution, noop);
noop.setIncomingConnection(noOpConn);
nextPartialSolution.addOutgoingConnection(noOpConn);
@@ -198,12 +196,7 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode
protected List<OperatorDescriptorSingle> getPossibleProperties() {
return Collections.<OperatorDescriptorSingle>singletonList(new NoOpDescriptor());
}
-
- @Override
- public boolean isMemoryConsumer() {
- return true;
- }
-
+
@Override
public void computeInterestingPropertiesForInputs(CostEstimator estimator) {
final InterestingProperties intProps = getInterestingProperties().clone();
@@ -306,12 +299,11 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode
else if (report == FeedbackPropertiesMeetRequirementsReport.NOT_MET) {
// attach a no-op node through which we create the properties of the original input
Channel toNoOp = new Channel(candidate);
- globPropsReq.parameterizeChannel(toNoOp, false, false);
+ globPropsReq.parameterizeChannel(toNoOp, false);
locPropsReq.parameterizeChannel(toNoOp);
UnaryOperatorNode rebuildPropertiesNode = new UnaryOperatorNode("Rebuild Partial Solution Properties", FieldList.EMPTY_LIST);
rebuildPropertiesNode.setDegreeOfParallelism(candidate.getDegreeOfParallelism());
- rebuildPropertiesNode.setSubtasksPerInstance(candidate.getSubtasksPerInstance());
SingleInputPlanNode rebuildPropertiesPlanNode = new SingleInputPlanNode(rebuildPropertiesNode, "Rebuild Partial Solution Properties", toNoOp, DriverStrategy.UNARY_NO_OP);
rebuildPropertiesPlanNode.initProperties(toNoOp.getGlobalProperties(), toNoOp.getLocalProperties());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSinkNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSinkNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSinkNode.java
index fe823d2..d4f9d67 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSinkNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSinkNode.java
@@ -87,11 +87,6 @@ public class DataSinkNode extends OptimizerNode {
}
@Override
- public boolean isMemoryConsumer() {
- return getPactContract().getPartitionOrdering() != null || getPactContract().getLocalOrder() != null;
- }
-
- @Override
public List<PactConnection> getIncomingConnections() {
return Collections.singletonList(this.input);
}
@@ -194,21 +189,16 @@ public class DataSinkNode extends OptimizerNode {
List<PlanNode> outputPlans = new ArrayList<PlanNode>();
final int dop = getDegreeOfParallelism();
- final int subPerInstance = getSubtasksPerInstance();
final int inDop = getPredecessorNode().getDegreeOfParallelism();
- final int inSubPerInstance = getPredecessorNode().getSubtasksPerInstance();
- final int numInstances = dop / subPerInstance + (dop % subPerInstance == 0 ? 0 : 1);
- final int inNumInstances = inDop / inSubPerInstance + (inDop % inSubPerInstance == 0 ? 0 : 1);
-
- final boolean globalDopChange = numInstances != inNumInstances;
- final boolean localDopChange = numInstances == inNumInstances & subPerInstance != inSubPerInstance;
-
+
+ final boolean dopChange = dop != inDop;
+
InterestingProperties ips = this.input.getInterestingProperties();
for (PlanNode p : subPlans) {
for (RequestedGlobalProperties gp : ips.getGlobalProperties()) {
for (RequestedLocalProperties lp : ips.getLocalProperties()) {
Channel c = new Channel(p);
- gp.parameterizeChannel(c, globalDopChange, localDopChange);
+ gp.parameterizeChannel(c, dopChange);
lp.parameterizeChannel(c);
c.setRequiredLocalProps(lp);
c.setRequiredGlobalProps(gp);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
index 17c11c9..7234420 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/DataSourceNode.java
@@ -55,7 +55,6 @@ public class DataSourceNode extends OptimizerNode {
if (NonParallelInput.class.isAssignableFrom(pactContract.getUserCodeWrapper().getUserCodeClass())) {
setDegreeOfParallelism(1);
- setSubtasksPerInstance(1);
this.sequentialInput = true;
} else {
this.sequentialInput = false;
@@ -78,27 +77,12 @@ public class DataSourceNode extends OptimizerNode {
}
@Override
- public boolean isMemoryConsumer() {
- return false;
- }
-
-
- @Override
public void setDegreeOfParallelism(int degreeOfParallelism) {
// if unsplittable, DOP remains at 1
if (!this.sequentialInput) {
super.setDegreeOfParallelism(degreeOfParallelism);
}
}
-
-
- @Override
- public void setSubtasksPerInstance(int instancesPerMachine) {
- // if unsplittable, DOP remains at 1
- if (!this.sequentialInput) {
- super.setSubtasksPerInstance(instancesPerMachine);
- }
- }
@Override
public List<PactConnection> getIncomingConnections() {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/GroupReduceNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/GroupReduceNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/GroupReduceNode.java
index 6eb2903..4d7230e 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/GroupReduceNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/GroupReduceNode.java
@@ -46,7 +46,6 @@ public class GroupReduceNode extends SingleInputNode {
if (this.keys == null) {
// case of a key-less reducer. force a parallelism of 1
setDegreeOfParallelism(1);
- setSubtasksPerInstance(1);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/OptimizerNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/OptimizerNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/OptimizerNode.java
index b2c9330..85a6568 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/OptimizerNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/OptimizerNode.java
@@ -262,13 +262,6 @@ public abstract class OptimizerNode implements Visitable<OptimizerNode>, Estimat
*/
@Override
public abstract void accept(Visitor<OptimizerNode> visitor);
-
- /**
- * Checks, whether this node requires memory for its tasks or not.
- *
- * @return True, if this node contains logic that requires memory usage, false otherwise.
- */
- public abstract boolean isMemoryConsumer();
/**
* Checks whether a field is modified by the user code or whether it is kept unchanged.
@@ -408,7 +401,7 @@ public abstract class OptimizerNode implements Visitable<OptimizerNode>, Estimat
* @param degreeOfParallelism
* The degree of parallelism to set.
* @throws IllegalArgumentException
- * If the degree of parallelism is smaller than one.
+ * If the degree of parallelism is smaller than one and not -1.
*/
public void setDegreeOfParallelism(int degreeOfParallelism) {
if (degreeOfParallelism < 1) {
@@ -416,48 +409,6 @@ public abstract class OptimizerNode implements Visitable<OptimizerNode>, Estimat
}
this.degreeOfParallelism = degreeOfParallelism;
}
-
- /**
- * Gets the number of parallel instances of the contract that are
- * to be executed on the same compute instance (logical machine).
- *
- * @return The number of subtask instances per machine.
- */
- public int getSubtasksPerInstance() {
- return this.subtasksPerInstance;
- }
-
- /**
- * Sets the number of parallel task instances of the contract that are
- * to be executed on the same computing instance (logical machine).
- *
- * @param instancesPerMachine The instances per machine.
- * @throws IllegalArgumentException If the number of instances per machine is smaller than one.
- */
- public void setSubtasksPerInstance(int instancesPerMachine) {
- if (instancesPerMachine < 1) {
- throw new IllegalArgumentException();
- }
- this.subtasksPerInstance = instancesPerMachine;
- }
-
- /**
- * Gets the minimal guaranteed memory per subtask for tasks represented by this OptimizerNode.
- *
- * @return The minimal guaranteed memory per subtask, in bytes.
- */
- public long getMinimalMemoryPerSubTask() {
- return this.minimalMemoryPerSubTask;
- }
-
- /**
- * Sets the minimal guaranteed memory per subtask for tasks represented by this OptimizerNode.
- *
- * @param minimalGuaranteedMemory The minimal guaranteed memory per subtask, in bytes.
- */
- public void setMinimalMemoryPerSubTask(long minimalGuaranteedMemory) {
- this.minimalMemoryPerSubTask = minimalGuaranteedMemory;
- }
/**
* Gets the amount of memory that all subtasks of this task have jointly available.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/ReduceNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/ReduceNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/ReduceNode.java
index 2190060..409d027 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/ReduceNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/ReduceNode.java
@@ -36,7 +36,6 @@ public class ReduceNode extends SingleInputNode {
if (this.keys == null) {
// case of a key-less reducer. force a parallelism of 1
setDegreeOfParallelism(1);
- setSubtasksPerInstance(1);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SingleInputNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SingleInputNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SingleInputNode.java
index 8bf3f16..0b872a7 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SingleInputNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SingleInputNode.java
@@ -206,22 +206,6 @@ public abstract class SingleInputNode extends OptimizerNode {
protected abstract List<OperatorDescriptorSingle> getPossibleProperties();
-
- @Override
- public boolean isMemoryConsumer() {
- for (OperatorDescriptorSingle dps : getPossibleProperties()) {
- if (dps.getStrategy().firstDam().isMaterializing()) {
- return true;
- }
- for (RequestedLocalProperties rlp : dps.getPossibleLocalProperties()) {
- if (!rlp.isTrivial()) {
- return true;
- }
- }
- }
- return false;
- }
-
@Override
public void computeInterestingPropertiesForInputs(CostEstimator estimator) {
// get what we inherit and what is preserved by our user code
@@ -284,30 +268,21 @@ public abstract class SingleInputNode extends OptimizerNode {
final ArrayList<PlanNode> outputPlans = new ArrayList<PlanNode>();
final int dop = getDegreeOfParallelism();
- final int subPerInstance = getSubtasksPerInstance();
final int inDop = getPredecessorNode().getDegreeOfParallelism();
- final int inSubPerInstance = getPredecessorNode().getSubtasksPerInstance();
- final int numInstances = dop / subPerInstance + (dop % subPerInstance == 0 ? 0 : 1);
- final int inNumInstances = inDop / inSubPerInstance + (inDop % inSubPerInstance == 0 ? 0 : 1);
-
- final boolean globalDopChange = numInstances != inNumInstances;
- final boolean localDopChange = numInstances == inNumInstances & subPerInstance != inSubPerInstance;
-
+
+ final boolean dopChange = inDop != dop;
+
// create all candidates
for (PlanNode child : subPlans) {
if (this.inConn.getShipStrategy() == null) {
// pick the strategy ourselves
for (RequestedGlobalProperties igps: intGlobal) {
final Channel c = new Channel(child, this.inConn.getMaterializationMode());
- igps.parameterizeChannel(c, globalDopChange, localDopChange);
+ igps.parameterizeChannel(c, dopChange);
// if the DOP changed, make sure that we cancel out properties, unless the
// ship strategy preserves/establishes them even under changing DOPs
- if (globalDopChange && !c.getShipStrategy().isNetworkStrategy()) {
- c.getGlobalProperties().reset();
- }
- if (localDopChange && !(c.getShipStrategy().isNetworkStrategy() ||
- c.getShipStrategy().compensatesForLocalDOPChanges())) {
+ if (dopChange && !c.getShipStrategy().isNetworkStrategy()) {
c.getGlobalProperties().reset();
}
@@ -332,12 +307,10 @@ public abstract class SingleInputNode extends OptimizerNode {
c.setShipStrategy(this.inConn.getShipStrategy());
}
- if (globalDopChange) {
+ if (dopChange) {
c.adjustGlobalPropertiesForFullParallelismChange();
- } else if (localDopChange) {
- c.adjustGlobalPropertiesForLocalParallelismChange();
}
-
+
// check whether we meet any of the accepted properties
for (RequestedGlobalProperties rgps: allValidGlobals) {
if (rgps.isMetBy(c.getGlobalProperties())) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SinkJoiner.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SinkJoiner.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SinkJoiner.java
index 2c765a5..a711ac5 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SinkJoiner.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/SinkJoiner.java
@@ -42,7 +42,6 @@ public class SinkJoiner extends TwoInputNode {
this.input2 = conn2;
setDegreeOfParallelism(1);
- setSubtasksPerInstance(1);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/86d206c4/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/TwoInputNode.java
----------------------------------------------------------------------
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/TwoInputNode.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/TwoInputNode.java
index 9898c81..97a92d0 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/TwoInputNode.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/dag/TwoInputNode.java
@@ -251,22 +251,6 @@ public abstract class TwoInputNode extends OptimizerNode {
}
protected abstract List<OperatorDescriptorDual> getPossibleProperties();
-
- @Override
- public boolean isMemoryConsumer() {
- for (OperatorDescriptorDual dpd : this.possibleProperties) {
- if (dpd.getStrategy().firstDam().isMaterializing() ||
- dpd.getStrategy().secondDam().isMaterializing()) {
- return true;
- }
- for (LocalPropertiesPair prp : dpd.getPossibleLocalProperties()) {
- if (!(prp.getProperties1().isTrivial() && prp.getProperties2().isTrivial())) {
- return true;
- }
- }
- }
- return false;
- }
@Override
public void computeInterestingPropertiesForInputs(CostEstimator estimator) {
@@ -348,20 +332,12 @@ public abstract class TwoInputNode extends OptimizerNode {
final ArrayList<PlanNode> outputPlans = new ArrayList<PlanNode>();
final int dop = getDegreeOfParallelism();
- final int subPerInstance = getSubtasksPerInstance();
- final int numInstances = dop / subPerInstance + (dop % subPerInstance == 0 ? 0 : 1);
final int inDop1 = getFirstPredecessorNode().getDegreeOfParallelism();
- final int inSubPerInstance1 = getFirstPredecessorNode().getSubtasksPerInstance();
- final int inNumInstances1 = inDop1 / inSubPerInstance1 + (inDop1 % inSubPerInstance1 == 0 ? 0 : 1);
final int inDop2 = getSecondPredecessorNode().getDegreeOfParallelism();
- final int inSubPerInstance2 = getSecondPredecessorNode().getSubtasksPerInstance();
- final int inNumInstances2 = inDop2 / inSubPerInstance2 + (inDop2 % inSubPerInstance2 == 0 ? 0 : 1);
-
- final boolean globalDopChange1 = numInstances != inNumInstances1;
- final boolean globalDopChange2 = numInstances != inNumInstances2;
- final boolean localDopChange1 = numInstances == inNumInstances1 & subPerInstance != inSubPerInstance1;
- final boolean localDopChange2 = numInstances == inNumInstances2 & subPerInstance != inSubPerInstance2;
-
+
+ final boolean dopChange1 = dop != inDop1;
+ final boolean dopChange2 = dop != inDop2;
+
// enumerate all pairwise combination of the children's plans together with
// all possible operator strategy combination
@@ -380,15 +356,11 @@ public abstract class TwoInputNode extends OptimizerNode {
final Channel c1 = new Channel(child1, this.input1.getMaterializationMode());
if (this.input1.getShipStrategy() == null) {
// free to choose the ship strategy
- igps1.parameterizeChannel(c1, globalDopChange1, localDopChange1);
+ igps1.parameterizeChannel(c1, dopChange1);
// if the DOP changed, make sure that we cancel out properties, unless the
// ship strategy preserves/establishes them even under changing DOPs
- if (globalDopChange1 && !c1.getShipStrategy().isNetworkStrategy()) {
- c1.getGlobalProperties().reset();
- }
- if (localDopChange1 && !(c1.getShipStrategy().isNetworkStrategy() ||
- c1.getShipStrategy().compensatesForLocalDOPChanges())) {
+ if (dopChange1 && !c1.getShipStrategy().isNetworkStrategy()) {
c1.getGlobalProperties().reset();
}
} else {
@@ -399,10 +371,8 @@ public abstract class TwoInputNode extends OptimizerNode {
c1.setShipStrategy(this.input1.getShipStrategy());
}
- if (globalDopChange1) {
+ if (dopChange1) {
c1.adjustGlobalPropertiesForFullParallelismChange();
- } else if (localDopChange1) {
- c1.adjustGlobalPropertiesForLocalParallelismChange();
}
}
@@ -411,15 +381,11 @@ public abstract class TwoInputNode extends OptimizerNode {
final Channel c2 = new Channel(child2, this.input2.getMaterializationMode());
if (this.input2.getShipStrategy() == null) {
// free to choose the ship strategy
- igps2.parameterizeChannel(c2, globalDopChange2, localDopChange2);
+ igps2.parameterizeChannel(c2, dopChange2);
// if the DOP changed, make sure that we cancel out properties, unless the
// ship strategy preserves/establishes them even under changing DOPs
- if (globalDopChange2 && !c2.getShipStrategy().isNetworkStrategy()) {
- c2.getGlobalProperties().reset();
- }
- if (localDopChange2 && !(c2.getShipStrategy().isNetworkStrategy() ||
- c2.getShipStrategy().compensatesForLocalDOPChanges())) {
+ if (dopChange2 && !c2.getShipStrategy().isNetworkStrategy()) {
c2.getGlobalProperties().reset();
}
} else {
@@ -430,10 +396,8 @@ public abstract class TwoInputNode extends OptimizerNode {
c2.setShipStrategy(this.input2.getShipStrategy());
}
- if (globalDopChange2) {
+ if (dopChange2) {
c2.adjustGlobalPropertiesForFullParallelismChange();
- } else if (localDopChange2) {
- c2.adjustGlobalPropertiesForLocalParallelismChange();
}
}
[20/22] Merge fix to omit input/output registering on JobManager
Rework Invokable Task Hierarchy
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
index f191df3..575454f 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/taskmanager/TaskManager.java
@@ -173,7 +173,7 @@ public class TaskManager implements TaskOperationProtocol {
}
- LOG.info("TaskManager started as user " + UserGroupInformation.getCurrentUser().getShortUserName());
+// LOG.info("TaskManager started as user " + UserGroupInformation.getCurrentUser().getShortUserName());
LOG.info("User system property: " + System.getProperty("user.name"));
LOG.info("Execution mode: " + executionMode);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java
deleted file mode 100644
index 88e4fcb..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInputTask.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.template;
-
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-import eu.stratosphere.core.io.InputSplit;
-
-/**
- * Abstract base class for tasks submitted as a part of a job input vertex.
- *
- * @param <T>
- * the type of input splits generated by this input task
- */
-public abstract class AbstractInputTask<T extends InputSplit> extends AbstractInvokable {
-
- /**
- * Returns an iterator to a (possible empty) list of input splits which is expected to be consumed by this
- * instance of the {@link AbstractInputTask}.
- *
- * @return an iterator to a (possible empty) list of input splits.
- */
- public Iterator<T> getInputSplits() {
-
- final InputSplitProvider provider = getEnvironment().getInputSplitProvider();
-
- return new Iterator<T>() {
-
- private T nextSplit;
-
- @Override
- public boolean hasNext() {
-
- if (this.nextSplit == null) {
-
- final InputSplit split = provider.getNextInputSplit();
- if (split != null) {
- @SuppressWarnings("unchecked")
- final T tSplit = (T) split;
- this.nextSplit = tSplit;
- return true;
- } else {
- return false;
- }
- } else {
- return true;
- }
- }
-
- @Override
- public T next() {
- if (this.nextSplit == null && !hasNext()) {
- throw new NoSuchElementException();
- }
-
- final T tmp = this.nextSplit;
- this.nextSplit = null;
- return tmp;
- }
-
- @Override
- public void remove() {
- throw new UnsupportedOperationException();
- }
- };
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
index 79390f8..792c1bf 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractInvokable.java
@@ -14,7 +14,6 @@
package eu.stratosphere.nephele.template;
import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.configuration.IllegalConfigurationException;
import eu.stratosphere.nephele.execution.Environment;
/**
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractOutputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractOutputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractOutputTask.java
deleted file mode 100644
index 13042d4..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractOutputTask.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.template;
-
-/**
- * Abstract base class for tasks submitted as a part of a job output vertex.
- *
- */
-public abstract class AbstractOutputTask extends AbstractInvokable {
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractTask.java
deleted file mode 100644
index 6d568ab..0000000
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/template/AbstractTask.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.template;
-
-/**
- * Abstract base class for tasks submitted as a part of a job task vertex.
- *
- */
-public abstract class AbstractTask extends AbstractInvokable {
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/io/FakeOutputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/io/FakeOutputTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/io/FakeOutputTask.java
index ced186b..f2944f4 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/io/FakeOutputTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/io/FakeOutputTask.java
@@ -13,14 +13,14 @@
package eu.stratosphere.pact.runtime.iterative.io;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.MutableRecordReader;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
import eu.stratosphere.types.Record;
/**
* Output task for the iteration tail
*/
-public class FakeOutputTask extends AbstractOutputTask {
+public class FakeOutputTask extends AbstractInvokable {
private MutableRecordReader<Record> reader;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationSynchronizationSinkTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationSynchronizationSinkTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationSynchronizationSinkTask.java
index 4e7286b..947872f 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationSynchronizationSinkTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationSynchronizationSinkTask.java
@@ -29,7 +29,7 @@ import eu.stratosphere.api.common.aggregators.ConvergenceCriterion;
import eu.stratosphere.nephele.event.task.AbstractTaskEvent;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
import eu.stratosphere.runtime.io.api.MutableRecordReader;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.nephele.types.IntegerRecord;
import eu.stratosphere.pact.runtime.iterative.event.AllWorkersDoneEvent;
import eu.stratosphere.pact.runtime.iterative.event.TerminationEvent;
@@ -44,7 +44,7 @@ import eu.stratosphere.types.Value;
* In each superstep, it simply waits until it has receiced a {@link WorkerDoneEvent} from each head and will send back
* an {@link AllWorkersDoneEvent} to signal that the next superstep can begin.
*/
-public class IterationSynchronizationSinkTask extends AbstractOutputTask implements Terminable {
+public class IterationSynchronizationSinkTask extends AbstractInvokable implements Terminable {
private static final Log log = LogFactory.getLog(IterationSynchronizationSinkTask.class);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationTailPactTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationTailPactTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationTailPactTask.java
index 859a62d..05b58e8 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationTailPactTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/iterative/task/IterationTailPactTask.java
@@ -97,7 +97,13 @@ public class IterationTailPactTask<S extends Function, OT> extends AbstractItera
log.info(formatLogString("starting iteration [" + currentIteration() + "]"));
}
- super.run();
+ try {
+ super.run();
+ }
+ catch (NullPointerException e) {
+ boolean terminationRequested = terminationRequested();
+ System.out.println("Nullpoint exception when termination requested was " + terminationRequested);
+ }
// check if termination was requested
checkForTerminationAndResetEndOfSuperstepState();
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
index cbe1766..7041679 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
@@ -16,26 +16,22 @@ package eu.stratosphere.pact.runtime.task;
import java.io.IOException;
import eu.stratosphere.pact.runtime.task.chaining.ExceptionInChainedStubException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import eu.stratosphere.api.common.io.FileOutputFormat;
-import eu.stratosphere.api.common.io.FileOutputFormat.OutputDirectoryMode;
import eu.stratosphere.api.common.io.OutputFormat;
import eu.stratosphere.api.common.typeutils.TypeComparatorFactory;
import eu.stratosphere.api.common.typeutils.TypeSerializer;
import eu.stratosphere.api.common.typeutils.TypeSerializerFactory;
import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.core.fs.FileSystem;
-import eu.stratosphere.core.fs.FileSystem.WriteMode;
-import eu.stratosphere.core.fs.Path;
import eu.stratosphere.core.io.IOReadableWritable;
import eu.stratosphere.nephele.execution.CancelTaskException;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.MutableReader;
import eu.stratosphere.runtime.io.api.MutableRecordReader;
import eu.stratosphere.runtime.io.api.MutableUnionRecordReader;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
import eu.stratosphere.pact.runtime.plugable.DeserializationDelegate;
import eu.stratosphere.pact.runtime.sort.UnilateralSortMerger;
import eu.stratosphere.pact.runtime.task.util.CloseableInputProvider;
@@ -51,7 +47,7 @@ import eu.stratosphere.util.MutableObjectIterator;
*
* @see OutputFormat
*/
-public class DataSinkTask<IT> extends AbstractOutputTask {
+public class DataSinkTask<IT> extends AbstractInvokable {
public static final String DEGREE_OF_PARALLELISM_KEY = "sink.dop";
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
index f835ace..62226d9 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
@@ -18,9 +18,11 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.NoSuchElementException;
import eu.stratosphere.pact.runtime.task.chaining.ExceptionInChainedStubException;
import eu.stratosphere.runtime.io.api.BufferWriter;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -32,7 +34,8 @@ import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.io.InputSplit;
import eu.stratosphere.nephele.execution.CancelTaskException;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
-import eu.stratosphere.nephele.template.AbstractInputTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
+import eu.stratosphere.nephele.template.InputSplitProvider;
import eu.stratosphere.pact.runtime.shipping.OutputCollector;
import eu.stratosphere.pact.runtime.shipping.RecordOutputCollector;
import eu.stratosphere.pact.runtime.task.chaining.ChainedCollectorMapDriver;
@@ -47,11 +50,11 @@ import eu.stratosphere.util.Collector;
*
* @see eu.stratosphere.api.common.io.InputFormat
*/
-public class DataSourceTask<OT> extends AbstractInputTask<InputSplit> {
+public class DataSourceTask<OT> extends AbstractInvokable {
- // Obtain DataSourceTask Logger
private static final Log LOG = LogFactory.getLog(DataSourceTask.class);
+
private List<BufferWriter> eventualOutputs;
// Output collector
@@ -76,11 +79,10 @@ public class DataSourceTask<OT> extends AbstractInputTask<InputSplit> {
@Override
- public void registerInputOutput()
- {
+ public void registerInputOutput() {
initInputFormat();
- if (LOG.isDebugEnabled())
+ if (LOG.isDebugEnabled()) {
LOG.debug(getLogString("Start registering input and output"));
}
@@ -331,7 +333,7 @@ l *
}
// get the factory for the type serializer
- this.serializerFactory = this.config.getOutputSerializer(cl);
+ this.serializerFactory = this.config.getOutputSerializer(this.userCodeClassLoader);
}
/**
@@ -343,49 +345,6 @@ l *
this.eventualOutputs = new ArrayList<BufferWriter>();
this.output = RegularPactTask.initOutputs(this, cl, this.config, this.chainedTasks, this.eventualOutputs);
}
-
- // ------------------------------------------------------------------------
- // Input Split creation
- // ------------------------------------------------------------------------
-
-
- @Override
- public InputSplit[] computeInputSplits(int requestedMinNumber) throws Exception {
- // we have to be sure that the format is instantiated at this point
- if (this.format == null) {
- throw new IllegalStateException("BUG: Input format hast not been instantiated, yet.");
- }
- return this.format.createInputSplits(requestedMinNumber);
- }
-
-
- @SuppressWarnings("unchecked")
- @Override
- public Class<InputSplit> getInputSplitType() {
- // we have to be sure that the format is instantiated at this point
- if (this.format == null) {
- throw new IllegalStateException("BUG: Input format hast not been instantiated, yet.");
- }
-
- return (Class<InputSplit>) this.format.getInputSplitType();
- }
-
- // ------------------------------------------------------------------------
- // Control of Parallelism
- // ------------------------------------------------------------------------
-
-
- @Override
- public int getMinimumNumberOfSubtasks() {
- return 1;
- }
-
-
- @Override
- public int getMaximumNumberOfSubtasks() {
- // since splits can in theory be arbitrarily small, we report a possible infinite number of subtasks.
- return -1;
- }
// ------------------------------------------------------------------------
// Utilities
@@ -413,4 +372,54 @@ l *
private String getLogString(String message, String taskName) {
return RegularPactTask.constructLogString(message, taskName, this);
}
+
+ private Iterator<InputSplit> getInputSplits() {
+
+ final InputSplitProvider provider = getEnvironment().getInputSplitProvider();
+
+ return new Iterator<InputSplit>() {
+
+ private InputSplit nextSplit;
+
+ private boolean exhausted;
+
+ @Override
+ public boolean hasNext() {
+ if (exhausted) {
+ return false;
+ }
+
+ if (nextSplit != null) {
+ return true;
+ }
+
+ InputSplit split = provider.getNextInputSplit();
+
+ if (split != null) {
+ this.nextSplit = split;
+ return true;
+ }
+ else {
+ exhausted = true;
+ return false;
+ }
+ }
+
+ @Override
+ public InputSplit next() {
+ if (this.nextSplit == null && !hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ final InputSplit tmp = this.nextSplit;
+ this.nextSplit = null;
+ return tmp;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
index 1d7c931..3140525 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/RegularPactTask.java
@@ -36,9 +36,7 @@ import eu.stratosphere.runtime.io.api.BufferWriter;
import eu.stratosphere.nephele.services.accumulators.AccumulatorEvent;
import eu.stratosphere.nephele.services.iomanager.IOManager;
import eu.stratosphere.nephele.services.memorymanager.MemoryManager;
-import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.nephele.template.AbstractTask;
import eu.stratosphere.pact.runtime.plugable.DeserializationDelegate;
import eu.stratosphere.pact.runtime.plugable.SerializationDelegate;
import eu.stratosphere.pact.runtime.resettable.SpillingResettableMutableObjectIterator;
@@ -73,7 +71,7 @@ import java.util.Map;
* The abstract base class for all tasks. Encapsulated common behavior and implements the main life-cycle
* of the user code.
*/
-public class RegularPactTask<S extends Function, OT> extends AbstractTask implements PactTaskContext<S, OT> {
+public class RegularPactTask<S extends Function, OT> extends AbstractInvokable implements PactTaskContext<S, OT> {
protected static final Log LOG = LogFactory.getLog(RegularPactTask.class);
@@ -1251,11 +1249,7 @@ public class RegularPactTask<S extends Function, OT> extends AbstractTask implem
oe = new RecordOutputEmitter(strategy, comparator, distribution);
}
- if (task instanceof AbstractTask) {
- writers.add(new RecordWriter<Record>((AbstractTask) task, oe));
- } else if (task instanceof AbstractInputTask<?>) {
- writers.add(new RecordWriter<Record>((AbstractInputTask<?>) task, oe));
- }
+ writers.add(new RecordWriter<Record>(task, oe));
}
if (eventualOutputs != null) {
eventualOutputs.addAll(writers);
@@ -1288,11 +1282,7 @@ public class RegularPactTask<S extends Function, OT> extends AbstractTask implem
oe = new OutputEmitter<T>(strategy, comparator, dataDist);
}
- if (task instanceof AbstractTask) {
- writers.add(new RecordWriter<SerializationDelegate<T>>((AbstractTask) task, oe));
- } else if (task instanceof AbstractInputTask<?>) {
- writers.add(new RecordWriter<SerializationDelegate<T>>((AbstractInputTask<?>) task, oe));
- }
+ writers.add(new RecordWriter<SerializationDelegate<T>>(task, oe));
}
if (eventualOutputs != null) {
eventualOutputs.addAll(writers);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
index 2eb003d..b44a489 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
@@ -1102,8 +1102,10 @@ public class TaskConfig {
/**
* A configuration that manages a subset of keys with a common prefix from a given configuration.
*/
- public static final class DelegatingConfiguration extends Configuration
- {
+ public static final class DelegatingConfiguration extends Configuration {
+
+ private static final long serialVersionUID = 1L;
+
private final Configuration backingConfig; // the configuration actually storing the data
private String prefix; // the prefix key by which keys for this config are marked
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/MutableRecordReader.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/MutableRecordReader.java b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/MutableRecordReader.java
index 9d03c7f..c54b542 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/MutableRecordReader.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/MutableRecordReader.java
@@ -17,8 +17,7 @@ import java.io.IOException;
import eu.stratosphere.core.io.IOReadableWritable;
import eu.stratosphere.runtime.io.gates.InputChannelResult;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
public class MutableRecordReader<T extends IOReadableWritable> extends AbstractSingleGateRecordReader<T> implements MutableReader<T> {
@@ -30,42 +29,9 @@ public class MutableRecordReader<T extends IOReadableWritable> extends AbstractS
*
* @param taskBase The application that instantiated the record reader.
*/
- public MutableRecordReader(final AbstractTask taskBase) {
+ public MutableRecordReader(AbstractInvokable taskBase) {
super(taskBase);
}
-
- /**
- * Constructs a new record reader and registers a new input gate with the application's environment.
- *
- * @param outputBase The application that instantiated the record reader.
- */
- public MutableRecordReader(final AbstractOutputTask outputBase) {
- super(outputBase);
- }
-
- /**
- * Constructs a new record reader and registers a new input gate with the application's environment.
- *
- * @param taskBase
- * the application that instantiated the record reader
- * @param inputGateID
- * The ID of the input gate that the reader reads from.
- */
- public MutableRecordReader(final AbstractTask taskBase, final int inputGateID) {
- super(taskBase);
- }
-
- /**
- * Constructs a new record reader and registers a new input gate with the application's environment.
- *
- * @param outputBase
- * the application that instantiated the record reader
- * @param inputGateID
- * The ID of the input gate that the reader reads from.
- */
- public MutableRecordReader(final AbstractOutputTask outputBase, final int inputGateID) {
- super(outputBase);
- }
// --------------------------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordReader.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordReader.java b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordReader.java
index bb6a580..5fc436c 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordReader.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordReader.java
@@ -14,8 +14,7 @@
package eu.stratosphere.runtime.io.api;
import eu.stratosphere.core.io.IOReadableWritable;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.gates.InputChannelResult;
import java.io.IOException;
@@ -50,23 +49,10 @@ public class RecordReader<T extends IOReadableWritable> extends AbstractSingleGa
* @param recordType
* The class of records that can be read from the record reader.
*/
- public RecordReader(AbstractTask taskBase, Class<T> recordType) {
+ public RecordReader(AbstractInvokable taskBase, Class<T> recordType) {
super(taskBase);
this.recordType = recordType;
}
-
- /**
- * Constructs a new record reader and registers a new input gate with the application's environment.
- *
- * @param outputBase
- * The application that instantiated the record reader.
- * @param recordType
- * The class of records that can be read from the record reader.
- */
- public RecordReader(AbstractOutputTask outputBase, Class<T> recordType) {
- super(outputBase);
- this.recordType = recordType;
- }
// --------------------------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordWriter.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordWriter.java b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordWriter.java
index 132dc14..a1ff62d 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordWriter.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/runtime/io/api/RecordWriter.java
@@ -15,9 +15,7 @@ package eu.stratosphere.runtime.io.api;
import eu.stratosphere.core.io.IOReadableWritable;
import eu.stratosphere.nephele.event.task.AbstractEvent;
-import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.nephele.template.AbstractInvokable;
-import eu.stratosphere.nephele.template.AbstractTask;
import eu.stratosphere.runtime.io.Buffer;
import eu.stratosphere.runtime.io.channels.EndOfSuperstepEvent;
import eu.stratosphere.runtime.io.network.bufferprovider.BufferProvider;
@@ -47,25 +45,11 @@ public class RecordWriter<T extends IOReadableWritable> extends BufferWriter {
// -----------------------------------------------------------------------------------------------------------------
- public RecordWriter(AbstractTask task) {
- this((AbstractInvokable) task, new RoundRobinChannelSelector<T>());
+ public RecordWriter(AbstractInvokable invokable) {
+ this(invokable, new RoundRobinChannelSelector<T>());
}
- public RecordWriter(AbstractTask task, ChannelSelector<T> channelSelector) {
- this((AbstractInvokable) task, channelSelector);
- }
-
- public RecordWriter(AbstractInputTask<?> task) {
- this((AbstractInvokable) task, new RoundRobinChannelSelector<T>());
- }
-
- public RecordWriter(AbstractInputTask<?> task, ChannelSelector<T> channelSelector) {
- this((AbstractInvokable) task, channelSelector);
- }
-
- // -----------------------------------------------------------------------------------------------------------------
-
- private RecordWriter(AbstractInvokable invokable, ChannelSelector<T> channelSelector) {
+ public RecordWriter(AbstractInvokable invokable, ChannelSelector<T> channelSelector) {
// initialize the gate
super(invokable);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
index fa0653b..2e75305 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ExecutionGraphTest.java
@@ -28,19 +28,21 @@ import org.apache.log4j.Level;
import org.junit.BeforeClass;
import org.junit.Test;
+import eu.stratosphere.api.java.io.DiscardingOuputFormat;
+import eu.stratosphere.api.java.io.TextInputFormat;
import eu.stratosphere.core.fs.Path;
import eu.stratosphere.nephele.execution.ExecutionState;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
import eu.stratosphere.nephele.jobgraph.DistributionPattern;
-import eu.stratosphere.nephele.jobgraph.JobFileInputVertex;
-import eu.stratosphere.nephele.jobgraph.JobFileOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
import eu.stratosphere.nephele.jobgraph.JobID;
+import eu.stratosphere.nephele.jobgraph.JobInputVertex;
+import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobTaskVertex;
-import eu.stratosphere.nephele.util.FileLineReader;
-import eu.stratosphere.nephele.util.FileLineWriter;
import eu.stratosphere.nephele.util.ServerTestUtils;
+import eu.stratosphere.pact.runtime.task.DataSinkTask;
+import eu.stratosphere.pact.runtime.task.DataSourceTask;
import eu.stratosphere.runtime.io.channels.ChannelType;
import eu.stratosphere.util.LogUtils;
@@ -49,6 +51,7 @@ import eu.stratosphere.util.LogUtils;
*
*/
public class ExecutionGraphTest {
+
@BeforeClass
public static void reduceLogLevel() {
LogUtils.initializeDefaultConsoleLogger(Level.WARN);
@@ -76,18 +79,21 @@ public class ExecutionGraphTest {
jobID = jg.getJobID();
// input vertex
- final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
- i1.setFilePath(new Path(inputFile.toURI()));
+ final JobInputVertex i1 = new JobInputVertex("Input 1", jg);
+ i1.setNumberOfSubtasks(1);
+ i1.setInvokableClass(DataSourceTask.class);
+ TextInputFormat inputFormat = new TextInputFormat(new Path(inputFile.toURI()));
+ i1.setInputFormat(inputFormat);
// task vertex
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
- t1.setTaskClass(ForwardTask1Input1Output.class);
+ t1.setInvokableClass(ForwardTask1Input1Output.class);
// output vertex
- final JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
- o1.setFilePath(new Path(new File(ServerTestUtils.getRandomFilename()).toURI()));
+ final JobOutputVertex o1 = new JobOutputVertex("Output 1", jg);
+ o1.setNumberOfSubtasks(1);
+ o1.setInvokableClass(DataSinkTask.class);
+ o1.setOutputFormat(new DiscardingOuputFormat<Object>());
o1.setVertexToShareInstancesWith(i1);
i1.setVertexToShareInstancesWith(t1);
@@ -171,7 +177,7 @@ public class ExecutionGraphTest {
assertEquals(0, egv0.getNumberOfBackwardLinks());
assertEquals(1, egv0.getNumberOfForwardLinks());
assertEquals(0, egv0.getStageNumber());
- assertEquals(-1, egv0.getUserDefinedNumberOfMembers());
+ assertEquals(1, egv0.getUserDefinedNumberOfMembers());
assertEquals("Task 1", egv0.getVertexToShareInstancesWith().getName());
// egv1 (output1)
@@ -189,7 +195,7 @@ public class ExecutionGraphTest {
assertEquals(1, egv1.getNumberOfBackwardLinks());
assertEquals(0, egv1.getNumberOfForwardLinks());
assertEquals(0, egv1.getStageNumber());
- assertEquals(-1, egv1.getUserDefinedNumberOfMembers());
+ assertEquals(1, egv1.getUserDefinedNumberOfMembers());
assertEquals("Input 1", egv1.getVertexToShareInstancesWith().getName());
// egv2 (task1)
@@ -278,18 +284,20 @@ public class ExecutionGraphTest {
jobID = jg.getJobID();
// input vertex
- final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
- i1.setFilePath(new Path(inputFile.toURI()));
+ final JobInputVertex i1 = new JobInputVertex("Input 1", jg);
+ i1.setInvokableClass(DataSourceTask.class);
+ i1.setInputFormat(new TextInputFormat(new Path(inputFile.toURI())));
+ i1.setNumberOfSubtasks(1);
// task vertex
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
- t1.setTaskClass(ForwardTask1Input1Output.class);
+ t1.setInvokableClass(ForwardTask1Input1Output.class);
// output vertex
- final JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
- o1.setFilePath(new Path(new File(ServerTestUtils.getRandomFilename()).toURI()));
+ final JobOutputVertex o1 = new JobOutputVertex("Output 1", jg);
+ o1.setNumberOfSubtasks(1);
+ o1.setInvokableClass(DataSinkTask.class);
+ o1.setOutputFormat(new DiscardingOuputFormat<Object>());
// connect vertices
i1.connectTo(t1, ChannelType.IN_MEMORY);
@@ -381,31 +389,32 @@ public class ExecutionGraphTest {
jobID = jg.getJobID();
// input vertex
- final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
- i1.setFilePath(new Path(inputFile1.toURI()));
+ final JobInputVertex i1 = new JobInputVertex("Input 1", jg);
+ i1.setInvokableClass(DataSourceTask.class);
+ i1.setInputFormat(new TextInputFormat(new Path(inputFile1.toURI())));
i1.setNumberOfSubtasks(2);
- final JobFileInputVertex i2 = new JobFileInputVertex("Input 2", jg);
- i2.setFileInputClass(FileLineReader.class);
- i2.setFilePath(new Path(inputFile2.toURI()));
+
+ final JobInputVertex i2 = new JobInputVertex("Input 2", jg);
+ i2.setInvokableClass(DataSourceTask.class);
+ i2.setInputFormat(new TextInputFormat(new Path(inputFile2.toURI())));
i2.setNumberOfSubtasks(2);
// task vertex
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
- t1.setTaskClass(ForwardTask1Input1Output.class);
+ t1.setInvokableClass(ForwardTask1Input1Output.class);
t1.setNumberOfSubtasks(2);
final JobTaskVertex t2 = new JobTaskVertex("Task 2", jg);
- t2.setTaskClass(ForwardTask1Input1Output.class);
+ t2.setInvokableClass(ForwardTask1Input1Output.class);
t2.setNumberOfSubtasks(2);
final JobTaskVertex t3 = new JobTaskVertex("Task 3", jg);
- t3.setTaskClass(ForwardTask2Inputs1Output.class);
+ t3.setInvokableClass(ForwardTask2Inputs1Output.class);
t3.setNumberOfSubtasks(2);
// output vertex
- final JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
- o1.setFilePath(new Path(outputFile.toURI()));
+ final JobOutputVertex o1 = new JobOutputVertex("Output 1", jg);
+ o1.setInvokableClass(DataSinkTask.class);
+ o1.setOutputFormat(new DiscardingOuputFormat<Object>());
o1.setNumberOfSubtasks(2);
i1.setVertexToShareInstancesWith(t1);
t1.setVertexToShareInstancesWith(t3);
@@ -624,35 +633,35 @@ public class ExecutionGraphTest {
jobID = jg.getJobID();
// input vertex
- final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
- i1.setFilePath(new Path(inputFile1.toURI()));
+ final JobInputVertex i1 = new JobInputVertex("Input 1", jg);
+ i1.setInvokableClass(DataSourceTask.class);
+ i1.setInputFormat(new TextInputFormat(new Path(inputFile1.toURI())));
i1.setNumberOfSubtasks(4);
- final JobFileInputVertex i2 = new JobFileInputVertex("Input 2", jg);
- i2.setFileInputClass(FileLineReader.class);
- i2.setFilePath(new Path(inputFile2.toURI()));
+ final JobInputVertex i2 = new JobInputVertex("Input 2", jg);
+ i2.setInvokableClass(DataSourceTask.class);
+ i2.setInputFormat(new TextInputFormat(new Path(inputFile2.toURI())));
i2.setNumberOfSubtasks(4);
// task vertex
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
- t1.setTaskClass(ForwardTask1Input1Output.class);
+ t1.setInvokableClass(ForwardTask1Input1Output.class);
t1.setNumberOfSubtasks(4);
final JobTaskVertex t2 = new JobTaskVertex("Task 2", jg);
- t2.setTaskClass(ForwardTask1Input1Output.class);
+ t2.setInvokableClass(ForwardTask1Input1Output.class);
t2.setNumberOfSubtasks(4);
final JobTaskVertex t3 = new JobTaskVertex("Task 3", jg);
- t3.setTaskClass(ForwardTask2Inputs1Output.class);
+ t3.setInvokableClass(ForwardTask2Inputs1Output.class);
t3.setNumberOfSubtasks(8);
final JobTaskVertex t4 = new JobTaskVertex("Task 4", jg);
- t4.setTaskClass(ForwardTask1Input2Outputs.class);
+ t4.setInvokableClass(ForwardTask1Input2Outputs.class);
t4.setNumberOfSubtasks(8);
// output vertex
- final JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
- o1.setFilePath(new Path(outputFile1.toURI()));
+ final JobOutputVertex o1 = new JobOutputVertex("Output 1", jg);
+ o1.setInvokableClass(DataSinkTask.class);
+ o1.setOutputFormat(new DiscardingOuputFormat<Object>());
o1.setNumberOfSubtasks(4);
- final JobFileOutputVertex o2 = new JobFileOutputVertex("Output 2", jg);
- o2.setFileOutputClass(FileLineWriter.class);
- o2.setFilePath(new Path(outputFile2.toURI()));
+ final JobOutputVertex o2 = new JobOutputVertex("Output 2", jg);
+ o2.setInvokableClass(DataSinkTask.class);
+ o2.setOutputFormat(new DiscardingOuputFormat<Object>());
o2.setNumberOfSubtasks(4);
o1.setVertexToShareInstancesWith(o2);
@@ -690,11 +699,8 @@ public class ExecutionGraphTest {
ev.updateExecutionState(ExecutionState.FINISHING);
ev.updateExecutionState(ExecutionState.FINISHED);
}
- } catch (GraphConversionException e) {
- fail(e.getMessage());
- } catch (JobGraphDefinitionException e) {
- fail(e.getMessage());
- } catch (IOException e) {
+ } catch (Exception e) {
+ e.printStackTrace();
fail(e.getMessage());
} finally {
if (inputFile1 != null) {
@@ -728,34 +734,33 @@ public class ExecutionGraphTest {
final String crossTaskName = "Self Cross Task";
final String outputTaskName = "Self Cross Output";
final int degreeOfParallelism = 4;
- File inputFile1 = null;
- File outputFile1 = null;
+ File inputFile = null;
+ File outputFile = null;
JobID jobID = null;
try {
-
- inputFile1 = ServerTestUtils.createInputFile(0);
- outputFile1 = new File(ServerTestUtils.getRandomFilename());
+ inputFile = ServerTestUtils.createInputFile(0);
+ outputFile = new File(ServerTestUtils.getRandomFilename());
// create job graph
final JobGraph jg = new JobGraph("Self Cross Test Job");
jobID = jg.getJobID();
// input vertex
- final JobFileInputVertex input = new JobFileInputVertex(inputTaskName, jg);
- input.setFileInputClass(SelfCrossInputTask.class);
- input.setFilePath(new Path(inputFile1.toURI()));
+ final JobInputVertex input = new JobInputVertex(inputTaskName, jg);
+ input.setInvokableClass(DataSourceTask.class);
+ input.setInputFormat(new TextInputFormat(new Path(inputFile.toURI())));
input.setNumberOfSubtasks(degreeOfParallelism);
// cross vertex
final JobTaskVertex cross = new JobTaskVertex(crossTaskName, jg);
- cross.setTaskClass(SelfCrossForwardTask.class);
+ cross.setInvokableClass(SelfCrossForwardTask.class);
cross.setNumberOfSubtasks(degreeOfParallelism);
// output vertex
- final JobFileOutputVertex output = new JobFileOutputVertex(outputTaskName, jg);
- output.setFileOutputClass(FileLineWriter.class);
- output.setFilePath(new Path(outputFile1.toURI()));
+ final JobOutputVertex output = new JobOutputVertex(outputTaskName, jg);
+ output.setInvokableClass(DataSinkTask.class);
+ output.setOutputFormat(new DiscardingOuputFormat<Object>());
output.setNumberOfSubtasks(degreeOfParallelism);
// connect vertices
@@ -835,11 +840,11 @@ public class ExecutionGraphTest {
} catch (IOException ioe) {
fail(ioe.getMessage());
} finally {
- if (inputFile1 != null) {
- inputFile1.delete();
+ if (inputFile != null) {
+ inputFile.delete();
}
- if (outputFile1 != null) {
- outputFile1.delete();
+ if (outputFile != null) {
+ outputFile.delete();
}
if (jobID != null) {
try {
@@ -872,30 +877,32 @@ public class ExecutionGraphTest {
jobID = jg.getJobID();
// input vertex
- final JobFileInputVertex input1 = new JobFileInputVertex("Input 1", jg);
- input1.setFileInputClass(FileLineReader.class);
- input1.setFilePath(new Path(inputFile1.toURI()));
+ final JobInputVertex input1 = new JobInputVertex("Input 1", jg);
+ input1.setInvokableClass(DataSourceTask.class);
+ input1.setInputFormat(new TextInputFormat(new Path(inputFile1.toURI())));
input1.setNumberOfSubtasks(degreeOfParallelism);
+
+
// forward vertex 1
final JobTaskVertex forward1 = new JobTaskVertex("Forward 1", jg);
- forward1.setTaskClass(ForwardTask1Input1Output.class);
+ forward1.setInvokableClass(ForwardTask1Input1Output.class);
forward1.setNumberOfSubtasks(degreeOfParallelism);
// forward vertex 2
final JobTaskVertex forward2 = new JobTaskVertex("Forward 2", jg);
- forward2.setTaskClass(ForwardTask1Input1Output.class);
+ forward2.setInvokableClass(ForwardTask1Input1Output.class);
forward2.setNumberOfSubtasks(degreeOfParallelism);
// forward vertex 3
final JobTaskVertex forward3 = new JobTaskVertex("Forward 3", jg);
- forward3.setTaskClass(ForwardTask1Input1Output.class);
+ forward3.setInvokableClass(ForwardTask1Input1Output.class);
forward3.setNumberOfSubtasks(degreeOfParallelism);
// output vertex
- final JobFileOutputVertex output1 = new JobFileOutputVertex("Output 1", jg);
- output1.setFileOutputClass(FileLineWriter.class);
- output1.setFilePath(new Path(outputFile1.toURI()));
+ final JobOutputVertex output1 = new JobOutputVertex("Output 1", jg);
+ output1.setInvokableClass(DataSinkTask.class);
+ output1.setOutputFormat(new DiscardingOuputFormat<Object>());
output1.setNumberOfSubtasks(degreeOfParallelism);
// connect vertices
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input1Output.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input1Output.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input1Output.java
index 0a2f52b..24f38b5 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input1Output.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input1Output.java
@@ -14,11 +14,11 @@
package eu.stratosphere.nephele.executiongraph;
import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractTask;
-public class ForwardTask1Input1Output extends AbstractTask {
+public class ForwardTask1Input1Output extends AbstractInvokable {
private RecordReader<StringRecord> input = null;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input2Outputs.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input2Outputs.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input2Outputs.java
index 5a5c325..370d0e4 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input2Outputs.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask1Input2Outputs.java
@@ -16,9 +16,9 @@ package eu.stratosphere.nephele.executiongraph;
import eu.stratosphere.core.io.StringRecord;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
-public class ForwardTask1Input2Outputs extends AbstractTask {
+public class ForwardTask1Input2Outputs extends AbstractInvokable {
private RecordReader<StringRecord> input = null;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask2Inputs1Output.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask2Inputs1Output.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask2Inputs1Output.java
index c87d093..b442dc6 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask2Inputs1Output.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/ForwardTask2Inputs1Output.java
@@ -14,11 +14,11 @@
package eu.stratosphere.nephele.executiongraph;
import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractTask;
-public class ForwardTask2Inputs1Output extends AbstractTask {
+public class ForwardTask2Inputs1Output extends AbstractInvokable {
private RecordReader<StringRecord> input1 = null;
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossForwardTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossForwardTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossForwardTask.java
index 05f181c..ac6aeb4 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossForwardTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossForwardTask.java
@@ -17,27 +17,20 @@ package eu.stratosphere.nephele.executiongraph;
import eu.stratosphere.core.io.StringRecord;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
/**
* This class represents the cross task in the self cross unit test.
- *
*/
-public class SelfCrossForwardTask extends AbstractTask {
-
+public class SelfCrossForwardTask extends AbstractInvokable {
@Override
public void registerInputOutput() {
-
new RecordReader<StringRecord>(this, StringRecord.class);
new RecordReader<StringRecord>(this, StringRecord.class);
new RecordWriter<StringRecord>(this);
}
-
@Override
- public void invoke() throws Exception {
-
- //Nothing to do here
- }
+ public void invoke() {}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java
new file mode 100644
index 0000000..0f24438
--- /dev/null
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java
@@ -0,0 +1,132 @@
+/***********************************************************************************************************************
+ * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ **********************************************************************************************************************/
+
+package eu.stratosphere.nephele.jobmanager;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import eu.stratosphere.core.fs.FSDataInputStream;
+import eu.stratosphere.core.fs.FileInputSplit;
+import eu.stratosphere.core.fs.FileSystem;
+import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
+import eu.stratosphere.nephele.template.InputSplitProvider;
+import eu.stratosphere.runtime.io.api.RecordWriter;
+import eu.stratosphere.runtime.fs.LineReader;
+
+public class DoubleSourceTask extends AbstractInvokable {
+
+ private RecordWriter<StringRecord> output1 = null;
+
+ private RecordWriter<StringRecord> output2 = null;
+
+ @Override
+ public void invoke() throws Exception {
+ this.output1.initializeSerializers();
+ this.output2.initializeSerializers();
+
+ final Iterator<FileInputSplit> splitIterator = getInputSplits();
+
+ while (splitIterator.hasNext()) {
+
+ final FileInputSplit split = splitIterator.next();
+
+ final long start = split.getStart();
+ final long length = split.getLength();
+
+ final FileSystem fs = FileSystem.get(split.getPath().toUri());
+
+ final FSDataInputStream fdis = fs.open(split.getPath());
+
+ final LineReader lineReader = new LineReader(fdis, start, length, (1024 * 1024));
+
+ byte[] line = lineReader.readLine();
+
+ while (line != null) {
+
+ // Create a string object from the data read
+ StringRecord str = new StringRecord();
+ str.set(line);
+
+ // Send out string
+ output1.emit(str);
+ output2.emit(str);
+
+ line = lineReader.readLine();
+ }
+
+ // Close the stream;
+ lineReader.close();
+ }
+
+ this.output1.flush();
+ this.output2.flush();
+ }
+
+ @Override
+ public void registerInputOutput() {
+ this.output1 = new RecordWriter<StringRecord>(this);
+ this.output2 = new RecordWriter<StringRecord>(this);
+ }
+
+ private Iterator<FileInputSplit> getInputSplits() {
+
+ final InputSplitProvider provider = getEnvironment().getInputSplitProvider();
+
+ return new Iterator<FileInputSplit>() {
+
+ private FileInputSplit nextSplit;
+
+ private boolean exhausted;
+
+ @Override
+ public boolean hasNext() {
+ if (exhausted) {
+ return false;
+ }
+
+ if (nextSplit != null) {
+ return true;
+ }
+
+ FileInputSplit split = (FileInputSplit) provider.getNextInputSplit();
+
+ if (split != null) {
+ this.nextSplit = split;
+ return true;
+ }
+ else {
+ exhausted = true;
+ return false;
+ }
+ }
+
+ @Override
+ public FileInputSplit next() {
+ if (this.nextSplit == null && !hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ final FileInputSplit tmp = this.nextSplit;
+ this.nextSplit = null;
+ return tmp;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
index a1ce0b2..5edfe0b 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
@@ -13,18 +13,18 @@
package eu.stratosphere.nephele.jobmanager;
+import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractTask;
-import eu.stratosphere.types.Record;
-public class DoubleTargetTask extends AbstractTask {
+public class DoubleTargetTask extends AbstractInvokable {
- private RecordReader<Record> input1 = null;
+ private RecordReader<StringRecord> input1 = null;
- private RecordReader<Record> input2 = null;
+ private RecordReader<StringRecord> input2 = null;
- private RecordWriter<Record> output = null;
+ private RecordWriter<StringRecord> output = null;
@Override
public void invoke() throws Exception {
@@ -33,13 +33,13 @@ public class DoubleTargetTask extends AbstractTask {
while (this.input1.hasNext()) {
- Record s = input1.next();
+ StringRecord s = input1.next();
this.output.emit(s);
}
while (this.input2.hasNext()) {
- Record s = input2.next();
+ StringRecord s = input2.next();
this.output.emit(s);
}
@@ -49,9 +49,9 @@ public class DoubleTargetTask extends AbstractTask {
@Override
public void registerInputOutput() {
- this.input1 = new RecordReader<Record>(this, Record.class);
- this.input2 = new RecordReader<Record>(this, Record.class);
- this.output = new RecordWriter<Record>(this);
+ this.input1 = new RecordReader<StringRecord>(this, StringRecord.class);
+ this.input2 = new RecordReader<StringRecord>(this, StringRecord.class);
+ this.output = new RecordWriter<StringRecord>(this);
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java
index ffc4b42..e2e09c3 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionOutputFormat.java
@@ -13,41 +13,37 @@
package eu.stratosphere.nephele.jobmanager;
+import eu.stratosphere.api.common.io.InitializeOnMaster;
import eu.stratosphere.api.common.io.OutputFormat;
import eu.stratosphere.configuration.Configuration;
+import eu.stratosphere.core.io.StringRecord;
import java.io.IOException;
-public class ExceptionOutputFormat implements OutputFormat<Object> {
+public class ExceptionOutputFormat implements OutputFormat<StringRecord>, InitializeOnMaster {
+
+ private static final long serialVersionUID = 1L;
+
/**
* The message which is used for the test runtime exception.
*/
public static final String RUNTIME_EXCEPTION_MESSAGE = "This is a test runtime exception";
-
@Override
- public void configure(Configuration parameters) {
-
- }
+ public void configure(Configuration parameters) {}
@Override
- public void open(int taskNumber, int numTasks) throws IOException {
-
- }
+ public void open(int taskNumber, int numTasks) {}
@Override
- public void writeRecord(Object record) throws IOException {
-
- }
+ public void writeRecord(StringRecord record) {}
@Override
- public void close() throws IOException {
-
- }
+ public void close() {}
@Override
- public void initialize(Configuration configuration) {
+ public void initializeGlobal(int parallelism) throws IOException {
throw new RuntimeException(RUNTIME_EXCEPTION_MESSAGE);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionTask.java
index 77b4f96..9f4bcdf 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ExceptionTask.java
@@ -14,16 +14,15 @@
package eu.stratosphere.nephele.jobmanager;
import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractTask;
/**
* This task is used during the unit tests to generate a custom exception and check the proper response of the execution
* engine.
- *
*/
-public class ExceptionTask extends AbstractTask {
+public class ExceptionTask extends AbstractInvokable {
/**
* The test error message included in the thrown exception
@@ -52,20 +51,14 @@ public class ExceptionTask extends AbstractTask {
}
}
-
@Override
public void registerInputOutput() {
-
new RecordReader<StringRecord>(this, StringRecord.class);
new RecordWriter<StringRecord>(this);
}
-
@Override
public void invoke() throws Exception {
-
- // Throw the exception immediately
throw new TestException(ERROR_MESSAGE);
}
-
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
index 377e304..e85b5f1 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
@@ -13,15 +13,15 @@
package eu.stratosphere.nephele.jobmanager;
+import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractTask;
-import eu.stratosphere.types.Record;
-public class ForwardTask extends AbstractTask {
+public class ForwardTask extends AbstractInvokable {
- private RecordReader<Record> input = null;
- private RecordWriter<Record> output = null;
+ private RecordReader<StringRecord> input = null;
+ private RecordWriter<StringRecord> output = null;
@Override
public void invoke() throws Exception {
@@ -30,7 +30,7 @@ public class ForwardTask extends AbstractTask {
while (this.input.hasNext()) {
- Record s = input.next();
+ StringRecord s = input.next();
this.output.emit(s);
}
@@ -39,7 +39,7 @@ public class ForwardTask extends AbstractTask {
@Override
public void registerInputOutput() {
- this.input = new RecordReader<Record>(this, Record.class);
- this.output = new RecordWriter<Record>(this);
+ this.input = new RecordReader<StringRecord>(this, StringRecord.class);
+ this.output = new RecordWriter<StringRecord>(this);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
index db2d9af..2549d4f 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/JobManagerITCase.java
@@ -13,6 +13,25 @@
package eu.stratosphere.nephele.jobmanager;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import eu.stratosphere.api.common.io.OutputFormat;
+import eu.stratosphere.api.common.operators.util.UserCodeObjectWrapper;
import eu.stratosphere.configuration.ConfigConstants;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.configuration.GlobalConfiguration;
@@ -22,37 +41,24 @@ import eu.stratosphere.nephele.client.JobClient;
import eu.stratosphere.nephele.client.JobExecutionException;
import eu.stratosphere.nephele.execution.RuntimeEnvironment;
import eu.stratosphere.nephele.jobgraph.DistributionPattern;
-import eu.stratosphere.runtime.io.channels.ChannelType;
-import eu.stratosphere.nephele.jobgraph.JobFileInputVertex;
-import eu.stratosphere.nephele.jobgraph.JobFileOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
+import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobTaskVertex;
import eu.stratosphere.nephele.taskmanager.Task;
import eu.stratosphere.nephele.taskmanager.TaskManager;
-import eu.stratosphere.nephele.util.FileLineReader;
-import eu.stratosphere.nephele.util.FileLineWriter;
import eu.stratosphere.nephele.util.JarFileCreator;
import eu.stratosphere.nephele.util.ServerTestUtils;
+import eu.stratosphere.nephele.util.tasks.DoubleSourceTask;
+import eu.stratosphere.nephele.util.tasks.FileLineReader;
+import eu.stratosphere.nephele.util.tasks.FileLineWriter;
+import eu.stratosphere.nephele.util.tasks.JobFileInputVertex;
+import eu.stratosphere.nephele.util.tasks.JobFileOutputVertex;
+import eu.stratosphere.pact.runtime.task.DataSinkTask;
+import eu.stratosphere.pact.runtime.task.util.TaskConfig;
+import eu.stratosphere.runtime.io.channels.ChannelType;
import eu.stratosphere.util.LogUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
/**
* This test is intended to cover the basic functionality of the {@link JobManager}.
*/
@@ -170,23 +176,23 @@ public class JobManagerITCase {
// input vertex
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
+ i1.setInvokableClass(FileLineReader.class);
i1.setFilePath(new Path(new File(testDirectory).toURI()));
i1.setNumberOfSubtasks(1);
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
- t1.setTaskClass(ForwardTask.class);
+ t1.setInvokableClass(ForwardTask.class);
t1.setNumberOfSubtasks(1);
// task vertex 2
final JobTaskVertex t2 = new JobTaskVertex("Task 2", jg);
- t2.setTaskClass(ForwardTask.class);
+ t2.setInvokableClass(ForwardTask.class);
t2.setNumberOfSubtasks(1);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
o1.setNumberOfSubtasks(1);
@@ -282,16 +288,16 @@ public class JobManagerITCase {
// input vertex
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
+ i1.setInvokableClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile.toURI()));
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task with Exception", jg);
- t1.setTaskClass(ExceptionTask.class);
+ t1.setInvokableClass(ExceptionTask.class);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
t1.setVertexToShareInstancesWith(i1);
@@ -330,10 +336,9 @@ public class JobManagerITCase {
fail("Expected exception but did not receive it");
- } catch (JobGraphDefinitionException jgde) {
- fail(jgde.getMessage());
- } catch (IOException ioe) {
- fail(ioe.getMessage());
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
} finally {
// Remove temporary files
@@ -376,16 +381,16 @@ public class JobManagerITCase {
// input vertex
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
+ i1.setInvokableClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile.toURI()));
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task with Exception", jg);
- t1.setTaskClass(RuntimeExceptionTask.class);
+ t1.setInvokableClass(RuntimeExceptionTask.class);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
t1.setVertexToShareInstancesWith(i1);
@@ -472,32 +477,28 @@ public class JobManagerITCase {
final JobGraph jg = new JobGraph("Job Graph for Exception Test");
// input vertex
- final JobInputVertex i1 = new JobInputVertex("Input 1", jg);
+ final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
+ i1.setInvokableClass(FileLineReader.class);
+ i1.setFilePath(new Path(inputFile.toURI()));
i1.setNumberOfSubtasks(1);
- Class<AbstractInputTask<?>> clazz = (Class<AbstractInputTask<?>>)(Class<?>)DataSourceTask
- .class;
- i1.setInputClass(clazz);
- TextInputFormat inputFormat = new TextInputFormat();
- inputFormat.setFilePath(new Path(inputFile.toURI()));
- i1.setInputFormat(inputFormat);
- i1.setInputFormat(inputFormat);
- i1.setOutputSerializer(RecordSerializerFactory.get());
- TaskConfig config= new TaskConfig(i1.getConfiguration());
- config.addOutputShipStrategy(ShipStrategyType.FORWARD);
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task with Exception", jg);
- t1.setTaskClass(ForwardTask.class);
+ t1.setInvokableClass(ForwardTask.class);
// output vertex
JobOutputVertex o1 = new JobOutputVertex("Output 1", jg);
o1.setNumberOfSubtasks(1);
- o1.setOutputClass(DataSinkTask.class);
+ o1.setInvokableClass(DataSinkTask.class);
ExceptionOutputFormat outputFormat = new ExceptionOutputFormat();
o1.setOutputFormat(outputFormat);
TaskConfig outputConfig = new TaskConfig(o1.getConfiguration());
- outputConfig.addInputToGroup(0);
- outputConfig.setInputSerializer(RecordSerializerFactory.get(), 0);
+ outputConfig.setStubWrapper(new UserCodeObjectWrapper<OutputFormat<?>>(outputFormat));
+// outputConfig.addInputToGroup(0);
+//
+// ValueSerializer<StringRecord> serializer = new ValueSerializer<StringRecord>(StringRecord.class);
+// RuntimeStatefulSerializerFactory<StringRecord> serializerFactory = new RuntimeStatefulSerializerFactory<StringRecord>(serializer, StringRecord.class);
+// outputConfig.setInputSerializer(serializerFactory, 0);
t1.setVertexToShareInstancesWith(i1);
o1.setVertexToShareInstancesWith(i1);
@@ -591,23 +592,23 @@ public class JobManagerITCase {
// input vertex
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
+ i1.setInvokableClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile.toURI()));
i1.setNumberOfSubtasks(1);
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task 1", jg);
- t1.setTaskClass(ForwardTask.class);
+ t1.setInvokableClass(ForwardTask.class);
t1.setNumberOfSubtasks(1);
// task vertex 2
final JobTaskVertex t2 = new JobTaskVertex("Task 2", jg);
- t2.setTaskClass(ForwardTask.class);
+ t2.setInvokableClass(ForwardTask.class);
t2.setNumberOfSubtasks(1);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
o1.setNumberOfSubtasks(1);
@@ -620,8 +621,9 @@ public class JobManagerITCase {
i1.connectTo(t1, ChannelType.NETWORK);
t1.connectTo(t2, ChannelType.IN_MEMORY);
t2.connectTo(o1, ChannelType.IN_MEMORY);
- } catch (JobGraphDefinitionException e) {
+ } catch (Exception e) {
e.printStackTrace();
+ fail(e.getMessage());
}
// add jar
@@ -693,16 +695,16 @@ public class JobManagerITCase {
// input vertex
final JobFileInputVertex i1 = new JobFileInputVertex("Input with two Outputs", jg);
- i1.setFileInputClass(DoubleSourceTask.class);
+ i1.setInvokableClass(DoubleSourceTask.class);
i1.setFilePath(new Path(inputFile.toURI()));
// task vertex 1
final JobTaskVertex t1 = new JobTaskVertex("Task with two Inputs", jg);
- t1.setTaskClass(DoubleTargetTask.class);
+ t1.setInvokableClass(DoubleTargetTask.class);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output 1", jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
t1.setVertexToShareInstancesWith(i1);
@@ -720,12 +722,9 @@ public class JobManagerITCase {
jobClient = new JobClient(jg, configuration);
jobClient.submitJobAndWait();
- } catch (JobExecutionException e) {
+ } catch (Exception e) {
+ e.printStackTrace();
fail(e.getMessage());
- } catch (JobGraphDefinitionException jgde) {
- fail(jgde.getMessage());
- } catch (IOException ioe) {
- fail(ioe.getMessage());
} finally {
// Remove temporary files
@@ -772,12 +771,12 @@ public class JobManagerITCase {
// input vertex
final JobFileInputVertex i1 = new JobFileInputVertex(jg);
- i1.setFileInputClass(FileLineReader.class);
+ i1.setInvokableClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile.toURI()));
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex(jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
o1.setVertexToShareInstancesWith(i1);
@@ -791,13 +790,9 @@ public class JobManagerITCase {
// Create job client and launch job
jobClient = new JobClient(jg, configuration);
jobClient.submitJobAndWait();
-
- } catch (JobExecutionException e) {
+ } catch (Exception e) {
+ e.printStackTrace();
fail(e.getMessage());
- } catch (JobGraphDefinitionException jgde) {
- fail(jgde.getMessage());
- } catch (IOException ioe) {
- fail(ioe.getMessage());
} finally {
// Remove temporary files
@@ -855,21 +850,21 @@ public class JobManagerITCase {
// input vertex 1
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
+ i1.setInvokableClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile1.toURI()));
// input vertex 2
final JobFileInputVertex i2 = new JobFileInputVertex("Input 2", jg);
- i2.setFileInputClass(FileLineReader.class);
+ i2.setInvokableClass(FileLineReader.class);
i2.setFilePath(new Path(inputFile2.toURI()));
// union task
final JobTaskVertex u1 = new JobTaskVertex("Union", jg);
- u1.setTaskClass(UnionTask.class);
+ u1.setInvokableClass(UnionTask.class);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output", jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
o1.setNumberOfSubtasks(1);
@@ -999,24 +994,24 @@ public class JobManagerITCase {
// input vertex 1
final JobFileInputVertex i1 = new JobFileInputVertex("Input 1", jg);
- i1.setFileInputClass(FileLineReader.class);
+ i1.setInvokableClass(FileLineReader.class);
i1.setFilePath(new Path(inputFile1.toURI()));
i1.setNumberOfSubtasks(numberOfSubtasks);
// input vertex 2
final JobFileInputVertex i2 = new JobFileInputVertex("Input 2", jg);
- i2.setFileInputClass(FileLineReader.class);
+ i2.setInvokableClass(FileLineReader.class);
i2.setFilePath(new Path(inputFile2.toURI()));
i2.setNumberOfSubtasks(numberOfSubtasks);
// union task
final JobTaskVertex f1 = new JobTaskVertex("Forward 1", jg);
- f1.setTaskClass(DoubleTargetTask.class);
+ f1.setInvokableClass(DoubleTargetTask.class);
f1.setNumberOfSubtasks(numberOfSubtasks);
// output vertex
JobFileOutputVertex o1 = new JobFileOutputVertex("Output", jg);
- o1.setFileOutputClass(FileLineWriter.class);
+ o1.setInvokableClass(FileLineWriter.class);
o1.setFilePath(new Path(outputFile.toURI()));
o1.setNumberOfSubtasks(numberOfSubtasks);
@@ -1051,6 +1046,9 @@ public class JobManagerITCase {
} catch (JobExecutionException e) {
// Job execution should lead to an error due to lack of resources
return;
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
}
finally {
tmLogger.setLevel(tmLevel);
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/RuntimeExceptionTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/RuntimeExceptionTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/RuntimeExceptionTask.java
index 9376099..ce20431 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/RuntimeExceptionTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/RuntimeExceptionTask.java
@@ -13,13 +13,12 @@
package eu.stratosphere.nephele.jobmanager;
-import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
/**
* This task throws a {@link RuntimeException} when the method <code>registerInputOutput</code> is called.
- *
*/
-public class RuntimeExceptionTask extends AbstractTask {
+public class RuntimeExceptionTask extends AbstractInvokable {
/**
* The message which is used for the test runtime exception.
@@ -29,15 +28,9 @@ public class RuntimeExceptionTask extends AbstractTask {
@Override
public void registerInputOutput() {
-
throw new RuntimeException(RUNTIME_EXCEPTION_MESSAGE);
}
-
@Override
- public void invoke() throws Exception {
-
- // Nothing to do here
- }
-
+ public void invoke() {}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
index 209eff1..f21c60e 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
@@ -14,36 +14,34 @@
package eu.stratosphere.nephele.jobmanager;
import eu.stratosphere.core.io.StringRecord;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.MutableRecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
import eu.stratosphere.runtime.io.api.UnionRecordReader;
-import eu.stratosphere.nephele.template.AbstractTask;
-import eu.stratosphere.types.Record;
/**
* A simple implementation of a task using a {@link UnionRecordReader}.
*/
-public class UnionTask extends AbstractTask {
+public class UnionTask extends AbstractInvokable {
/**
* The union record reader to be used during the tests.
*/
- private UnionRecordReader<Record> unionReader;
+ private UnionRecordReader<StringRecord> unionReader;
- private RecordWriter<Record> writer;
+ private RecordWriter<StringRecord> writer;
@Override
public void registerInputOutput() {
@SuppressWarnings("unchecked")
- MutableRecordReader<Record>[] recordReaders = (MutableRecordReader<Record>[]) new
- MutableRecordReader<?>[2];
- recordReaders[0] = new MutableRecordReader<Record>(this);
- recordReaders[1] = new MutableRecordReader<Record>(this);
- this.unionReader = new UnionRecordReader<Record>(recordReaders, Record.class);
+ MutableRecordReader<StringRecord>[] recordReaders = (MutableRecordReader<StringRecord>[]) new MutableRecordReader<?>[2];
+ recordReaders[0] = new MutableRecordReader<StringRecord>(this);
+ recordReaders[1] = new MutableRecordReader<StringRecord>(this);
+ this.unionReader = new UnionRecordReader<StringRecord>(recordReaders, StringRecord.class);
- this.writer = new RecordWriter<Record>(this);
+ this.writer = new RecordWriter<StringRecord>(this);
}
@Override
@@ -56,4 +54,4 @@ public class UnionTask extends AbstractTask {
this.writer.flush();
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/8c1d82a8/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
index 6a41fe9..e5cabb8 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
@@ -24,6 +24,9 @@ import eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler;
import org.junit.Test;
+import eu.stratosphere.api.common.io.GenericInputFormat;
+import eu.stratosphere.api.common.io.OutputFormat;
+import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.core.io.StringRecord;
import eu.stratosphere.nephele.execution.ExecutionState;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
@@ -35,63 +38,41 @@ import eu.stratosphere.nephele.jobgraph.JobGraphDefinitionException;
import eu.stratosphere.nephele.jobgraph.JobInputVertex;
import eu.stratosphere.nephele.jobgraph.JobOutputVertex;
import eu.stratosphere.nephele.jobmanager.scheduler.SchedulingException;
-import eu.stratosphere.nephele.template.AbstractGenericInputTask;
-import eu.stratosphere.nephele.template.AbstractOutputTask;
+import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
import eu.stratosphere.runtime.io.channels.ChannelType;
+import eu.stratosphere.types.IntValue;
import eu.stratosphere.util.StringUtils;
/**
- * This class checks the functionality of the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} class
+ * This class checks the functionality of the {@link eu.stratosphere.nephele.jobmanager.scheduler.DefaultScheduler} class
*/
+@SuppressWarnings("serial")
public class DefaultSchedulerTest {
- /**
- * Test input task.
- *
- */
- public static final class InputTask extends AbstractGenericInputTask {
- /**
- * {@inheritDoc}
- */
+ public static final class InputTask extends AbstractInvokable {
+
@Override
public void registerInputOutput() {
new RecordWriter<StringRecord>(this);
}
- /**
- * {@inheritDoc}
- */
@Override
- public void invoke() throws Exception {
- // Nothing to do here
- }
+ public void invoke() throws Exception {}
}
- /**
- * Test output task.
- *
- */
- public static final class OutputTask extends AbstractOutputTask {
+ public static final class OutputTask extends AbstractInvokable {
- /**
- * {@inheritDoc}
- */
@Override
public void registerInputOutput() {
new RecordReader<StringRecord>(this, StringRecord.class);
}
- /**
- * {@inheritDoc}
- */
@Override
- public void invoke() throws Exception {
- // Nothing to do here
- }
+ public void invoke() throws Exception {}
}
@@ -111,29 +92,16 @@ public class DefaultSchedulerTest {
public static final class DummyOutputFormat implements OutputFormat<IntValue> {
@Override
- public void configure(Configuration parameters) {
-
- }
+ public void configure(Configuration parameters) {}
@Override
- public void open(int taskNumber, int numTasks) throws IOException {
-
- }
-
- @Override
- public void writeRecord(IntValue record) throws IOException {
-
- }
+ public void open(int taskNumber, int numTasks) {}
@Override
- public void close() throws IOException {
-
- }
+ public void writeRecord(IntValue record) {}
@Override
- public void initialize(Configuration configuration) {
-
- }
+ public void close() {}
}
/**
@@ -148,12 +116,12 @@ public class DefaultSchedulerTest {
final JobGraph jobGraph = new JobGraph("Job Graph");
final JobInputVertex inputVertex = new JobInputVertex("Input 1", jobGraph);
- inputVertex.setInputClass(InputTask.class);
+ inputVertex.setInvokableClass(InputTask.class);
inputVertex.setInputFormat(new DummyInputFormat());
inputVertex.setNumberOfSubtasks(1);
final JobOutputVertex outputVertex = new JobOutputVertex("Output 1", jobGraph);
- outputVertex.setOutputClass(OutputTask.class);
+ outputVertex.setInvokableClass(OutputTask.class);
outputVertex.setOutputFormat(new DummyOutputFormat());
outputVertex.setNumberOfSubtasks(1);
[13/22] Removed RuntimeEnvironment instantiation from execution graph
construction. Removed legacy job vertex classes and input/output tasks.
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
index 82359f5..cbe1766 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSinkTask.java
@@ -351,64 +351,6 @@ public class DataSinkTask<IT> extends AbstractOutputTask {
throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent.");
}
}
-
- // ------------------------------------------------------------------------
- // Degree of parallelism & checks
- // ------------------------------------------------------------------------
-
-
- @Override
- public int getMaximumNumberOfSubtasks() {
- if (!(this.format instanceof FileOutputFormat<?>)) {
- return -1;
- }
-
- final FileOutputFormat<?> fileOutputFormat = (FileOutputFormat<?>) this.format;
-
- // ----------------- This code applies only to file inputs ------------------
-
- final Path path = fileOutputFormat.getOutputFilePath();
- final WriteMode writeMode = fileOutputFormat.getWriteMode();
- final OutputDirectoryMode outDirMode = fileOutputFormat.getOutputDirectoryMode();
-
- // Prepare output path and determine max DOP
- try {
-
- int dop = getTaskConfiguration().getInteger(DEGREE_OF_PARALLELISM_KEY, -1);
- final FileSystem fs = path.getFileSystem();
-
- if(dop == 1 && outDirMode == OutputDirectoryMode.PARONLY) {
- // output is not written in parallel and should be written to a single file.
-
- if(fs.isDistributedFS()) {
- // prepare distributed output path
- if(!fs.initOutPathDistFS(path, writeMode, false)) {
- // output preparation failed! Cancel task.
- throw new IOException("Output path could not be initialized.");
- }
- }
-
- return 1;
-
- } else {
- // output should be written to a directory
-
- if(fs.isDistributedFS()) {
- // only distributed file systems can be initialized at start-up time.
- if(!fs.initOutPathDistFS(path, writeMode, true)) {
- throw new IOException("Output directory could not be created.");
- }
- }
-
- return -1;
-
- }
- }
- catch (IOException e) {
- LOG.error("Could not access the file system to detemine the status of the output.", e);
- throw new RuntimeException("I/O Error while accessing file", e);
- }
- }
// ------------------------------------------------------------------------
// Utilities
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
index af176b9..f835ace 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/DataSourceTask.java
@@ -78,27 +78,12 @@ public class DataSourceTask<OT> extends AbstractInputTask<InputSplit> {
@Override
public void registerInputOutput()
{
- if (LOG.isDebugEnabled()) {
+ initInputFormat();
+
+ if (LOG.isDebugEnabled())
LOG.debug(getLogString("Start registering input and output"));
}
- if (this.userCodeClassLoader == null) {
- try {
- this.userCodeClassLoader = LibraryCacheManager.getClassLoader(getEnvironment().getJobID());
- }
- catch (IOException ioe) {
- throw new RuntimeException("Usercode ClassLoader could not be obtained for job: " +
- getEnvironment().getJobID(), ioe);
- }
- }
-
- // obtain task configuration (including stub parameters)
- Configuration taskConf = getTaskConfiguration();
- taskConf.setClassLoader(this.userCodeClassLoader);
- this.config = new TaskConfig(taskConf);
-
- initInputFormat(this.userCodeClassLoader);
-
try {
initOutputs(this.userCodeClassLoader);
} catch (Exception ex) {
@@ -301,17 +286,42 @@ public class DataSourceTask<OT> extends AbstractInputTask<InputSplit> {
/**
* Initializes the InputFormat implementation and configuration.
- *
+l *
* @throws RuntimeException
* Throws if instance of InputFormat implementation can not be
* obtained.
*/
- private void initInputFormat(ClassLoader cl) {
- // instantiate the stub
- @SuppressWarnings("unchecked")
- Class<InputFormat<OT, InputSplit>> superClass = (Class<InputFormat<OT, InputSplit>>) (Class<?>) InputFormat.class;
- this.format = RegularPactTask.instantiateUserCode(this.config, cl, superClass);
-
+ private void initInputFormat() {
+ if (this.userCodeClassLoader == null) {
+ try {
+ this.userCodeClassLoader = LibraryCacheManager.getClassLoader(getEnvironment().getJobID());
+ }
+ catch (IOException ioe) {
+ throw new RuntimeException("Usercode ClassLoader could not be obtained for job: " +
+ getEnvironment().getJobID(), ioe);
+ }
+ }
+
+ // obtain task configuration (including stub parameters)
+ Configuration taskConf = getTaskConfiguration();
+ taskConf.setClassLoader(this.userCodeClassLoader);
+ this.config = new TaskConfig(taskConf);
+
+ try {
+ this.format = config.<InputFormat<OT, InputSplit>>getStubWrapper(this.userCodeClassLoader)
+ .getUserCodeObject(InputFormat.class, this.userCodeClassLoader);
+
+ // check if the class is a subclass, if the check is required
+ if (!InputFormat.class.isAssignableFrom(this.format.getClass())) {
+ throw new RuntimeException("The class '" + this.format.getClass().getName() + "' is not a subclass of '" +
+ InputFormat.class.getName() + "' as is required.");
+ }
+ }
+ catch (ClassCastException ccex) {
+ throw new RuntimeException("The stub class is not a proper subclass of " + InputFormat.class.getName(),
+ ccex);
+ }
+
// configure the stub. catch exceptions here extra, to report them as originating from the user code
try {
this.format.configure(this.config.getStubParameters());
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
index a43f8cc..2eb003d 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/pact/runtime/task/util/TaskConfig.java
@@ -246,6 +246,10 @@ public class TaskConfig {
public String getTaskName() {
return this.config.getString(TASK_NAME, null);
}
+
+ public boolean hasStubWrapper() {
+ return this.config.containsKey(STUB_OBJECT);
+ }
public void setStubWrapper(UserCodeWrapper<?> wrapper) {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/deployment/TaskDeploymentDescriptorTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/deployment/TaskDeploymentDescriptorTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/deployment/TaskDeploymentDescriptorTest.java
index 7000667..dc2e605 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/deployment/TaskDeploymentDescriptorTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/deployment/TaskDeploymentDescriptorTest.java
@@ -19,12 +19,12 @@ import static org.junit.Assert.fail;
import java.io.IOException;
+import eu.stratosphere.pact.runtime.task.RegularPactTask;
import org.junit.Test;
import eu.stratosphere.configuration.Configuration;
import eu.stratosphere.nephele.execution.librarycache.LibraryCacheManager;
import eu.stratosphere.nephele.executiongraph.ExecutionVertexID;
-import eu.stratosphere.nephele.util.FileLineReader;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.nephele.util.SerializableArrayList;
@@ -50,7 +50,7 @@ public class TaskDeploymentDescriptorTest {
final int currentNumberOfSubtasks = 1;
final Configuration jobConfiguration = new Configuration();
final Configuration taskConfiguration = new Configuration();
- final Class<? extends AbstractInvokable> invokableClass = FileLineReader.class;
+ final Class<? extends AbstractInvokable> invokableClass = RegularPactTask.class;
final SerializableArrayList<GateDeploymentDescriptor> outputGates = new SerializableArrayList<GateDeploymentDescriptor>(
0);
final SerializableArrayList<GateDeploymentDescriptor> inputGates = new SerializableArrayList<GateDeploymentDescriptor>(
@@ -85,7 +85,7 @@ public class TaskDeploymentDescriptorTest {
final int currentNumberOfSubtasks = 1;
final Configuration jobConfiguration = new Configuration();
final Configuration taskConfiguration = new Configuration();
- final Class<? extends AbstractInvokable> invokableClass = FileLineReader.class;
+ final Class<? extends AbstractInvokable> invokableClass = RegularPactTask.class;
final SerializableArrayList<GateDeploymentDescriptor> outputGates = new SerializableArrayList<GateDeploymentDescriptor>(
0);
final SerializableArrayList<GateDeploymentDescriptor> inputGates = new SerializableArrayList<GateDeploymentDescriptor>(
@@ -239,7 +239,7 @@ public class TaskDeploymentDescriptorTest {
final int currentNumberOfSubtasks = 1;
final Configuration jobConfiguration = new Configuration();
final Configuration taskConfiguration = new Configuration();
- final Class<? extends AbstractInvokable> invokableClass = FileLineReader.class;
+ final Class<? extends AbstractInvokable> invokableClass = RegularPactTask.class;
final SerializableArrayList<GateDeploymentDescriptor> outputGates = new SerializableArrayList<GateDeploymentDescriptor>(
0);
final SerializableArrayList<GateDeploymentDescriptor> inputGates = new SerializableArrayList<GateDeploymentDescriptor>(
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossInputTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossInputTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossInputTask.java
deleted file mode 100644
index 1ce23e6..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/executiongraph/SelfCrossInputTask.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.executiongraph;
-
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractFileInputTask;
-
-/**
- * This class represents the data source in the self cross unit test.
- *
- */
-public class SelfCrossInputTask extends AbstractFileInputTask {
-
-
- @Override
- public void registerInputOutput() {
-
- new RecordWriter<StringRecord>(this);
- new RecordWriter<StringRecord>(this);
- }
-
-
- @Override
- public void invoke() throws Exception {
-
- // Nothing to do here
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java
deleted file mode 100644
index 1e2be47..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleSourceTask.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.jobmanager;
-
-import java.util.Iterator;
-
-import eu.stratosphere.core.fs.FSDataInputStream;
-import eu.stratosphere.core.fs.FileInputSplit;
-import eu.stratosphere.core.fs.FileSystem;
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractFileInputTask;
-import eu.stratosphere.runtime.fs.LineReader;
-
-public class DoubleSourceTask extends AbstractFileInputTask {
-
- private RecordWriter<StringRecord> output1 = null;
-
- private RecordWriter<StringRecord> output2 = null;
-
- @Override
- public void invoke() throws Exception {
- this.output1.initializeSerializers();
- this.output2.initializeSerializers();
-
- final Iterator<FileInputSplit> splitIterator = getFileInputSplits();
-
- while (splitIterator.hasNext()) {
-
- final FileInputSplit split = splitIterator.next();
-
- final long start = split.getStart();
- final long length = split.getLength();
-
- final FileSystem fs = FileSystem.get(split.getPath().toUri());
-
- final FSDataInputStream fdis = fs.open(split.getPath());
-
- final LineReader lineReader = new LineReader(fdis, start, length, (1024 * 1024));
-
- byte[] line = lineReader.readLine();
-
- while (line != null) {
-
- // Create a string object from the data read
- StringRecord str = new StringRecord();
- str.set(line);
-
- // Send out string
- output1.emit(str);
- output2.emit(str);
-
- line = lineReader.readLine();
- }
-
- // Close the stream;
- lineReader.close();
- }
-
- this.output1.flush();
- this.output2.flush();
- }
-
- @Override
- public void registerInputOutput() {
- this.output1 = new RecordWriter<StringRecord>(this);
- this.output2 = new RecordWriter<StringRecord>(this);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
index f0ca435..a1ce0b2 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/DoubleTargetTask.java
@@ -13,18 +13,18 @@
package eu.stratosphere.nephele.jobmanager;
-import eu.stratosphere.core.io.StringRecord;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.types.Record;
public class DoubleTargetTask extends AbstractTask {
- private RecordReader<StringRecord> input1 = null;
+ private RecordReader<Record> input1 = null;
- private RecordReader<StringRecord> input2 = null;
+ private RecordReader<Record> input2 = null;
- private RecordWriter<StringRecord> output = null;
+ private RecordWriter<Record> output = null;
@Override
public void invoke() throws Exception {
@@ -33,13 +33,13 @@ public class DoubleTargetTask extends AbstractTask {
while (this.input1.hasNext()) {
- StringRecord s = input1.next();
+ Record s = input1.next();
this.output.emit(s);
}
while (this.input2.hasNext()) {
- StringRecord s = input2.next();
+ Record s = input2.next();
this.output.emit(s);
}
@@ -49,9 +49,9 @@ public class DoubleTargetTask extends AbstractTask {
@Override
public void registerInputOutput() {
- this.input1 = new RecordReader<StringRecord>(this, StringRecord.class);
- this.input2 = new RecordReader<StringRecord>(this, StringRecord.class);
- this.output = new RecordWriter<StringRecord>(this);
+ this.input1 = new RecordReader<Record>(this, Record.class);
+ this.input2 = new RecordReader<Record>(this, Record.class);
+ this.output = new RecordWriter<Record>(this);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
index 96be668..377e304 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/ForwardTask.java
@@ -13,15 +13,15 @@
package eu.stratosphere.nephele.jobmanager;
-import eu.stratosphere.core.io.StringRecord;
import eu.stratosphere.runtime.io.api.RecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.types.Record;
public class ForwardTask extends AbstractTask {
- private RecordReader<StringRecord> input = null;
- private RecordWriter<StringRecord> output = null;
+ private RecordReader<Record> input = null;
+ private RecordWriter<Record> output = null;
@Override
public void invoke() throws Exception {
@@ -30,7 +30,7 @@ public class ForwardTask extends AbstractTask {
while (this.input.hasNext()) {
- StringRecord s = input.next();
+ Record s = input.next();
this.output.emit(s);
}
@@ -39,7 +39,7 @@ public class ForwardTask extends AbstractTask {
@Override
public void registerInputOutput() {
- this.input = new RecordReader<StringRecord>(this, StringRecord.class);
- this.output = new RecordWriter<StringRecord>(this);
+ this.input = new RecordReader<Record>(this, Record.class);
+ this.output = new RecordWriter<Record>(this);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
index 124a24d..209eff1 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/UnionTask.java
@@ -18,6 +18,7 @@ import eu.stratosphere.runtime.io.api.MutableRecordReader;
import eu.stratosphere.runtime.io.api.RecordWriter;
import eu.stratosphere.runtime.io.api.UnionRecordReader;
import eu.stratosphere.nephele.template.AbstractTask;
+import eu.stratosphere.types.Record;
/**
* A simple implementation of a task using a {@link UnionRecordReader}.
@@ -27,21 +28,22 @@ public class UnionTask extends AbstractTask {
/**
* The union record reader to be used during the tests.
*/
- private UnionRecordReader<StringRecord> unionReader;
+ private UnionRecordReader<Record> unionReader;
- private RecordWriter<StringRecord> writer;
+ private RecordWriter<Record> writer;
@Override
public void registerInputOutput() {
@SuppressWarnings("unchecked")
- MutableRecordReader<StringRecord>[] recordReaders = (MutableRecordReader<StringRecord>[]) new MutableRecordReader<?>[2];
- recordReaders[0] = new MutableRecordReader<StringRecord>(this);
- recordReaders[1] = new MutableRecordReader<StringRecord>(this);
- this.unionReader = new UnionRecordReader<StringRecord>(recordReaders, StringRecord.class);
+ MutableRecordReader<Record>[] recordReaders = (MutableRecordReader<Record>[]) new
+ MutableRecordReader<?>[2];
+ recordReaders[0] = new MutableRecordReader<Record>(this);
+ recordReaders[1] = new MutableRecordReader<Record>(this);
+ this.unionReader = new UnionRecordReader<Record>(recordReaders, Record.class);
- this.writer = new RecordWriter<StringRecord>(this);
+ this.writer = new RecordWriter<Record>(this);
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
index c8bcddc..6a41fe9 100644
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
+++ b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/jobmanager/scheduler/queue/DefaultSchedulerTest.java
@@ -95,6 +95,47 @@ public class DefaultSchedulerTest {
}
+ public static final class DummyInputFormat extends GenericInputFormat<IntValue> {
+
+ @Override
+ public boolean reachedEnd() throws IOException {
+ return true;
+ }
+
+ @Override
+ public IntValue nextRecord(IntValue reuse) throws IOException {
+ return null;
+ }
+ }
+
+ public static final class DummyOutputFormat implements OutputFormat<IntValue> {
+
+ @Override
+ public void configure(Configuration parameters) {
+
+ }
+
+ @Override
+ public void open(int taskNumber, int numTasks) throws IOException {
+
+ }
+
+ @Override
+ public void writeRecord(IntValue record) throws IOException {
+
+ }
+
+ @Override
+ public void close() throws IOException {
+
+ }
+
+ @Override
+ public void initialize(Configuration configuration) {
+
+ }
+ }
+
/**
* Constructs a sample execution graph consisting of two vertices connected by a channel of the given type.
*
@@ -108,10 +149,12 @@ public class DefaultSchedulerTest {
final JobInputVertex inputVertex = new JobInputVertex("Input 1", jobGraph);
inputVertex.setInputClass(InputTask.class);
+ inputVertex.setInputFormat(new DummyInputFormat());
inputVertex.setNumberOfSubtasks(1);
final JobOutputVertex outputVertex = new JobOutputVertex("Output 1", jobGraph);
outputVertex.setOutputClass(OutputTask.class);
+ outputVertex.setOutputFormat(new DummyOutputFormat());
outputVertex.setNumberOfSubtasks(1);
try {
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineReader.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineReader.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineReader.java
deleted file mode 100644
index fcb4fa1..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineReader.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.util;
-
-import java.util.Iterator;
-
-import eu.stratosphere.core.fs.FSDataInputStream;
-import eu.stratosphere.core.fs.FileInputSplit;
-import eu.stratosphere.core.fs.FileSystem;
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.template.AbstractFileInputTask;
-import eu.stratosphere.runtime.fs.LineReader;
-
-/**
- * A file line reader reads the associated file input splits line by line and outputs the lines as string records.
- *
- */
-public class FileLineReader extends AbstractFileInputTask {
-
- private RecordWriter<StringRecord> output = null;
-
- @Override
- public void invoke() throws Exception {
-
- output.initializeSerializers();
-
- final Iterator<FileInputSplit> splitIterator = getFileInputSplits();
-
- while (splitIterator.hasNext()) {
-
- final FileInputSplit split = splitIterator.next();
-
- long start = split.getStart();
- long length = split.getLength();
-
- final FileSystem fs = FileSystem.get(split.getPath().toUri());
-
- final FSDataInputStream fdis = fs.open(split.getPath());
-
- final LineReader lineReader = new LineReader(fdis, start, length, (1024 * 1024));
-
- byte[] line = lineReader.readLine();
-
- while (line != null) {
-
- // Create a string object from the data read
- StringRecord str = new StringRecord();
- str.set(line);
-
- // Send out string
- output.emit(str);
-
- line = lineReader.readLine();
- }
-
- // Close the stream;
- lineReader.close();
- }
-
- this.output.flush();
- }
-
- @Override
- public void registerInputOutput() {
- output = new RecordWriter<StringRecord>(this);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineWriter.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineWriter.java b/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineWriter.java
deleted file mode 100644
index bc738df..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/nephele/util/FileLineWriter.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.nephele.util;
-
-import eu.stratosphere.core.fs.FSDataOutputStream;
-import eu.stratosphere.core.fs.FileStatus;
-import eu.stratosphere.core.fs.FileSystem;
-import eu.stratosphere.core.fs.Path;
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.runtime.io.api.RecordReader;
-import eu.stratosphere.nephele.template.AbstractFileOutputTask;
-
-/**
- * A file line writer reads string records its input gate and writes them to the associated output file.
- *
- */
-public class FileLineWriter extends AbstractFileOutputTask {
-
- /**
- * The record reader through which incoming string records are received.
- */
- private RecordReader<StringRecord> input = null;
-
-
- @Override
- public void invoke() throws Exception {
-
- Path outputPath = getFileOutputPath();
-
- FileSystem fs = FileSystem.get(outputPath.toUri());
- if (fs.exists(outputPath)) {
- FileStatus status = fs.getFileStatus(outputPath);
-
- if (status.isDir()) {
- outputPath = new Path(outputPath.toUri().toString() + "/file_" + getIndexInSubtaskGroup() + ".txt");
- }
- }
-
- final FSDataOutputStream outputStream = fs.create(outputPath, true);
-
- while (this.input.hasNext()) {
-
- StringRecord record = this.input.next();
- byte[] recordByte = (record.toString() + "\r\n").getBytes();
- outputStream.write(recordByte, 0, recordByte.length);
- }
-
- outputStream.close();
-
- }
-
-
- @Override
- public void registerInputOutput() {
- this.input = new RecordReader<StringRecord>(this, StringRecord.class);
- }
-
-
- @Override
- public int getMaximumNumberOfSubtasks() {
- // The default implementation always returns -1
- return -1;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/library/FileLineReadWriteTest.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/library/FileLineReadWriteTest.java b/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/library/FileLineReadWriteTest.java
deleted file mode 100644
index 17c2f58..0000000
--- a/stratosphere-runtime/src/test/java/eu/stratosphere/runtime/io/library/FileLineReadWriteTest.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/***********************************************************************************************************************
- * Copyright (C) 2010-2013 by the Stratosphere project (http://stratosphere.eu)
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
- * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- **********************************************************************************************************************/
-
-package eu.stratosphere.runtime.io.library;
-
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import static org.powermock.api.mockito.PowerMockito.whenNew;
-
-import java.io.File;
-import java.io.IOException;
-
-import eu.stratosphere.runtime.io.api.RecordWriter;
-import eu.stratosphere.nephele.util.FileLineReader;
-import eu.stratosphere.nephele.util.FileLineWriter;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-import org.powermock.reflect.Whitebox;
-
-import eu.stratosphere.configuration.Configuration;
-import eu.stratosphere.core.fs.FileInputSplit;
-import eu.stratosphere.core.fs.Path;
-import eu.stratosphere.core.io.StringRecord;
-import eu.stratosphere.nephele.execution.Environment;
-import eu.stratosphere.runtime.io.api.RecordReader;
-import eu.stratosphere.nephele.template.InputSplitProvider;
-
-/**
- * This class checks the functionality of the {@link eu.stratosphere.nephele.util.FileLineReader} and the {@link eu.stratosphere.nephele.util.FileLineWriter} class.
- *
- */
-@RunWith(PowerMockRunner.class)
-@PrepareForTest(FileLineReader.class)
-public class FileLineReadWriteTest {
-
- @Mock
- private Environment environment;
-
- @Mock
- private Configuration conf;
-
- @Mock
- private RecordReader<StringRecord> recordReader;
-
- @Mock
- private RecordWriter<StringRecord> recordWriter;
-
- @Mock
- private InputSplitProvider inputSplitProvider;
-
- private File file = new File("./tmp");
-
- /**
- * Set up mocks
- *
- * @throws IOException
- */
- @Before
- public void before() throws Exception {
-
- MockitoAnnotations.initMocks(this);
- }
-
- /**
- * remove the temporary file
- */
- @After
- public void after() {
- this.file.delete();
- }
-
- /**
- * Tests the read and write methods
- *
- * @throws Exception
- */
- @Test
- public void testReadWrite() throws Exception {
-
- this.file.createNewFile();
- FileLineWriter writer = new FileLineWriter();
- Whitebox.setInternalState(writer, "environment", this.environment);
- Whitebox.setInternalState(writer, "input", this.recordReader);
- when(this.environment.getTaskConfiguration()).thenReturn(this.conf);
-
- when(this.conf.getString("outputPath", null)).thenReturn(this.file.toURI().toString());
- when(this.recordReader.hasNext()).thenReturn(true, true, true, false);
- StringRecord in = new StringRecord("abc");
- try {
- when(this.recordReader.next()).thenReturn(in);
- } catch (IOException e) {
- fail();
- e.printStackTrace();
- } catch (InterruptedException e) {
- fail();
- e.printStackTrace();
- }
- writer.invoke();
-
- final FileInputSplit split = new FileInputSplit(0, new Path(this.file.toURI().toString()), 0,
- this.file.length(), null);
- when(this.environment.getInputSplitProvider()).thenReturn(this.inputSplitProvider);
- when(this.inputSplitProvider.getNextInputSplit()).thenReturn(split, (FileInputSplit) null);
-
- FileLineReader reader = new FileLineReader();
- Whitebox.setInternalState(reader, "environment", this.environment);
- Whitebox.setInternalState(reader, "output", this.recordWriter);
- StringRecord record = mock(StringRecord.class);
-
- whenNew(StringRecord.class).withNoArguments().thenReturn(record);
-
- reader.invoke();
-
- // verify the correct bytes have been written and read
- verify(record, times(3)).set(in.getBytes());
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/ea79186b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
----------------------------------------------------------------------
diff --git a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
index 57253d1..3de547e 100644
--- a/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
+++ b/stratosphere-tests/src/test/java/eu/stratosphere/test/recordJobs/util/DiscardingOutputFormat.java
@@ -44,4 +44,7 @@ public class DiscardingOutputFormat implements OutputFormat<Record> {
@Override
public void close() throws IOException
{}
+
+ @Override
+ public void initialize(Configuration configuration){}
}
[18/22] git commit: Streamlined job graph algorithms to get rid off
linear contains operations.
Posted by se...@apache.org.
Streamlined job graph algorithms to get rid off linear contains operations.
Project: http://git-wip-us.apache.org/repos/asf/incubator-flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-flink/commit/e52fcf90
Tree: http://git-wip-us.apache.org/repos/asf/incubator-flink/tree/e52fcf90
Diff: http://git-wip-us.apache.org/repos/asf/incubator-flink/diff/e52fcf90
Branch: refs/heads/master
Commit: e52fcf90c37f921f50cd75dfcb7960d2f37c5e74
Parents: fba44a9
Author: Till Rohrmann <ti...@gmail.com>
Authored: Wed Apr 9 16:18:44 2014 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Sun Jun 22 21:07:20 2014 +0200
----------------------------------------------------------------------
.../stratosphere/nephele/jobgraph/JobGraph.java | 120 +++++++------------
1 file changed, 45 insertions(+), 75 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-flink/blob/e52fcf90/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
----------------------------------------------------------------------
diff --git a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
index 804a258..f048b0d 100644
--- a/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
+++ b/stratosphere-runtime/src/main/java/eu/stratosphere/nephele/jobgraph/JobGraph.java
@@ -92,6 +92,11 @@ public class JobGraph implements IOReadableWritable {
private static final int BUFFERSIZE = 8192;
/**
+ * Buffer for array of reachable job vertices
+ */
+ private volatile AbstractJobVertex[] bufferedAllReachableJobVertices = null;
+
+ /**
* Constructs a new job graph with a random job ID.
*/
public JobGraph() {
@@ -253,14 +258,51 @@ public class JobGraph implements IOReadableWritable {
/**
* Returns an array of all job vertices than can be reached when traversing the job graph from the input vertices.
+ * Each job vertex is contained only one time.
*
* @return an array of all job vertices than can be reached when traversing the job graph from the input vertices
*/
public AbstractJobVertex[] getAllReachableJobVertices() {
+ if(bufferedAllReachableJobVertices == null){
+ final List<AbstractJobVertex> collector = new ArrayList<AbstractJobVertex>();
+ final HashSet<JobVertexID> visited = new HashSet<JobVertexID>();
+
+ final Iterator<AbstractJobInputVertex> inputs = getInputVertices();
+
+ while(inputs.hasNext()){
+ AbstractJobVertex vertex = inputs.next();
+
+ if(!visited.contains(vertex.getID())){
+ collectVertices(vertex, visited, collector);
+ }
+ }
+
+ bufferedAllReachableJobVertices = collector.toArray(new AbstractJobVertex[0]);
+ }
+
+ return bufferedAllReachableJobVertices;
+ }
+
+ /**
+ * Auxiliary method to collect all vertices which are reachable from the input vertices.
+ *
+ * @param jv
+ * the currently considered job vertex
+ * @param collector
+ * a temporary list to store the vertices that have already been visisted
+ */
+ private void collectVertices(final AbstractJobVertex jv, final HashSet<JobVertexID> visited, final
+ List<AbstractJobVertex> collector) {
+ visited.add(jv.getID());
+ collector.add(jv);
- final Vector<AbstractJobVertex> collector = new Vector<AbstractJobVertex>();
- collectVertices(null, collector);
- return collector.toArray(new AbstractJobVertex[0]);
+ for(int i =0; i < jv.getNumberOfForwardConnections(); i++){
+ AbstractJobVertex vertex = jv.getForwardConnection(i).getConnectedVertex();
+
+ if(!visited.contains(vertex.getID())){
+ collectVertices(vertex, visited, collector);
+ }
+ }
}
/**
@@ -293,34 +335,6 @@ public class JobGraph implements IOReadableWritable {
return vertices;
}
- /**
- * Auxiliary method to collect all vertices which are reachable from the input vertices.
- *
- * @param jv
- * the currently considered job vertex
- * @param collector
- * a temporary list to store the vertices that have already been visisted
- */
- private void collectVertices(final AbstractJobVertex jv, final List<AbstractJobVertex> collector) {
-
- if (jv == null) {
- final Iterator<AbstractJobInputVertex> iter = getInputVertices();
- while (iter.hasNext()) {
- collectVertices(iter.next(), collector);
- }
- } else {
-
- if (!collector.contains(jv)) {
- collector.add(jv);
- } else {
- return;
- }
-
- for (int i = 0; i < jv.getNumberOfForwardConnections(); i++) {
- collectVertices(jv.getForwardConnection(i).getConnectedVertex(), collector);
- }
- }
- }
/**
* Returns the ID of the job.
@@ -356,31 +370,6 @@ public class JobGraph implements IOReadableWritable {
}
/**
- * Checks if the job vertex with the given ID is registered with the job graph.
- *
- * @param id
- * the ID of the vertex to search for
- * @return <code>true</code> if a vertex with the given ID is registered with the job graph, <code>false</code>
- * otherwise.
- */
- private boolean includedInJobGraph(final JobVertexID id) {
-
- if (this.inputVertices.containsKey(id)) {
- return true;
- }
-
- if (this.outputVertices.containsKey(id)) {
- return true;
- }
-
- if (this.taskVertices.containsKey(id)) {
- return true;
- }
-
- return false;
- }
-
- /**
* Checks if the job graph is weakly connected.
*
* @return <code>true</code> if the job graph is weakly connected, otherwise <code>false</code>
@@ -395,25 +384,6 @@ public class JobGraph implements IOReadableWritable {
return false;
}
- final HashMap<JobVertexID, AbstractJobVertex> tmp = new HashMap<JobVertexID, AbstractJobVertex>();
- for (int i = 0; i < reachable.length; i++) {
- tmp.put(reachable[i].getID(), reachable[i]);
- }
-
- // Check if all is subset of reachable
- for (int i = 0; i < all.length; i++) {
- if (!tmp.containsKey(all[i].getID())) {
- return false;
- }
- }
-
- // Check if reachable is a subset of all
- for (int i = 0; i < reachable.length; i++) {
- if (!includedInJobGraph(reachable[i].getID())) {
- return false;
- }
- }
-
return true;
}