You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by vo...@apache.org on 2016/01/11 16:27:28 UTC
[01/11] ignite git commit: Platforms now publish DEVNOTES.txt
Repository: ignite
Updated Branches:
refs/heads/ignite-2314 51cdfd448 -> 58c045a6e
Platforms now publish DEVNOTES.txt
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/f1f8cda2
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/f1f8cda2
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/f1f8cda2
Branch: refs/heads/ignite-2314
Commit: f1f8cda2f3f62231f42a59951bf34c39577c1bec
Parents: f97dc9f
Author: Anton Vinogradov <av...@apache.org>
Authored: Tue Dec 29 13:02:42 2015 +0300
Committer: Anton Vinogradov <av...@apache.org>
Committed: Tue Dec 29 13:02:42 2015 +0300
----------------------------------------------------------------------
assembly/release-fabric-base.xml | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ignite/blob/f1f8cda2/assembly/release-fabric-base.xml
----------------------------------------------------------------------
diff --git a/assembly/release-fabric-base.xml b/assembly/release-fabric-base.xml
index 5be9f8f..9f9d310 100644
--- a/assembly/release-fabric-base.xml
+++ b/assembly/release-fabric-base.xml
@@ -104,6 +104,7 @@
<outputDirectory>/platforms/cpp</outputDirectory>
<includes>
<include>README.txt</include>
+ <include>DEVNOTES.txt</include>
</includes>
</fileSet>
[03/11] ignite git commit: Revert "IGNITE-2330: Simplified GridFunc."
Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple3.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple3.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple3.java
index e5d247a..b999e2a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple3.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple3.java
@@ -34,6 +34,7 @@ import org.jetbrains.annotations.Nullable;
* This class doesn't provide any synchronization for multi-threaded access
* and it is responsibility of the user of this class to provide outside
* synchronization, if needed.
+ * @see GridFunc#t3()
* @see GridFunc#t(Object, Object, Object)
*/
public class GridTuple3<V1, V2, V3> implements Iterable<Object>, Externalizable, Cloneable {
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple4.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple4.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple4.java
index d1e69b5..c95a859 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple4.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple4.java
@@ -34,6 +34,7 @@ import org.jetbrains.annotations.Nullable;
* This class doesn't provide any synchronization for multi-threaded access
* and it is responsibility of the user of this class to provide outside
* synchronization, if needed.
+ * @see GridFunc#t4()
* @see GridFunc#t(Object, Object, Object, Object)
*/
public class GridTuple4<V1, V2, V3, V4> implements Iterable<Object>, Externalizable, Cloneable {
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple5.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple5.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple5.java
index 7d25996..9790f48 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple5.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple5.java
@@ -34,6 +34,7 @@ import org.jetbrains.annotations.Nullable;
* This class doesn't provide any synchronization for multi-threaded access
* and it is responsibility of the user of this class to provide outside
* synchronization, if needed.
+ * @see GridFunc#t5()
* @see GridFunc#t(Object, Object, Object, Object, Object)
*/
public class GridTuple5<V1, V2, V3, V4, V5> implements Iterable<Object>, Externalizable, Cloneable {
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple6.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple6.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple6.java
index c904587..044944b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple6.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTuple6.java
@@ -34,6 +34,7 @@ import org.jetbrains.annotations.Nullable;
* This class doesn't provide any synchronization for multi-threaded access
* and it is responsibility of the user of this class to provide outside
* synchronization, if needed.
+ * @see GridFunc#t5()
* @see GridFunc#t(Object, Object, Object, Object, Object)
*/
public class GridTuple6<V1, V2, V3, V4, V5, V6> implements Iterable<Object>, Externalizable,
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTupleV.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTupleV.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTupleV.java
index 58e18aa..225366a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTupleV.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridTupleV.java
@@ -35,6 +35,7 @@ import org.apache.ignite.internal.util.typedef.internal.U;
* This class doesn't provide any synchronization for multi-threaded access
* and it is responsibility of the user of this class to provide outside
* synchronization, if needed.
+ * @see GridFunc#tv(Object...)
*/
public class GridTupleV implements Iterable<Object>, Externalizable, Cloneable {
/** */
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/lang/IgniteUuid.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/lang/IgniteUuid.java b/modules/core/src/main/java/org/apache/ignite/lang/IgniteUuid.java
index 44ca067..5c6bb9a 100644
--- a/modules/core/src/main/java/org/apache/ignite/lang/IgniteUuid.java
+++ b/modules/core/src/main/java/org/apache/ignite/lang/IgniteUuid.java
@@ -186,7 +186,7 @@ public final class IgniteUuid implements Comparable<IgniteUuid>, Iterable<Ignite
/** {@inheritDoc} */
@Override public GridIterator<IgniteUuid> iterator() {
- return F.identityIteratorReadOnly(Collections.singleton(this));
+ return F.iterator(Collections.singleton(this), F.<IgniteUuid>identity(), true);
}
/** {@inheritDoc} */
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/spi/eventstorage/memory/MemoryEventStorageSpi.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/eventstorage/memory/MemoryEventStorageSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/eventstorage/memory/MemoryEventStorageSpi.java
index 56a627a..c7c635e 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/eventstorage/memory/MemoryEventStorageSpi.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/eventstorage/memory/MemoryEventStorageSpi.java
@@ -216,7 +216,6 @@ public class MemoryEventStorageSpi extends IgniteSpiAdapter implements EventStor
}
/** {@inheritDoc} */
- @SuppressWarnings("unchecked")
@Override public <T extends Event> Collection<T> localEvents(IgnitePredicate<T> p) {
A.notNull(p, "p");
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheLuceneQueryIndexTest.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheLuceneQueryIndexTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheLuceneQueryIndexTest.java
index 1e868b7..59a3eca 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheLuceneQueryIndexTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheLuceneQueryIndexTest.java
@@ -21,7 +21,6 @@ import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Callable;
-import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
@@ -336,8 +335,7 @@ public class GridCacheLuceneQueryIndexTest extends GridCommonAbstractTest {
map = new HashMap<>();
}
- map.put(new ObjectKey(String.valueOf(i)),
- vals[ThreadLocalRandom.current().nextInt(vals.length)]);
+ map.put(new ObjectKey(String.valueOf(i)), F.rand(vals));
}
if (!map.isEmpty()) {
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java
index 52737e7..cb24e54 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java
@@ -757,7 +757,7 @@ public abstract class GridCacheEventAbstractTest extends GridCacheAbstractSelfTe
if (TEST_INFO)
X.println("Cache event: " + evt.shortDisplay());
- AtomicInteger cntr = F.addIfAbsent(cntrs, evt.type(), new AtomicInteger());
+ AtomicInteger cntr = F.addIfAbsent(cntrs, evt.type(), F.newAtomicInt());
assert cntr != null;
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryCrashDetectionSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryCrashDetectionSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryCrashDetectionSelfTest.java
index 3482fd6..1aae999 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryCrashDetectionSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryCrashDetectionSelfTest.java
@@ -517,16 +517,12 @@ public class IpcSharedMemoryCrashDetectionSelfTest extends GridCommonAbstractTes
* @param shmemIds Shared memory IDs string.
*/
public void shmemIds(String shmemIds) {
- if (shmemIds == null)
- this.shmemIds = null;
- else {
- String[] tokens = shmemIds.split(",");
-
- this.shmemIds = new ArrayList<>(tokens.length);
-
- for (String token : tokens)
- this.shmemIds.add(Long.valueOf(token).intValue());
- }
+ this.shmemIds = (shmemIds == null) ? null :
+ F.transform(shmemIds.split(","), new C1<String, Integer>() {
+ @Override public Integer apply(String s) {
+ return Long.valueOf(s).intValue();
+ }
+ });
}
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/test/java/org/apache/ignite/lang/GridBasicPerformanceTest.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/lang/GridBasicPerformanceTest.java b/modules/core/src/test/java/org/apache/ignite/lang/GridBasicPerformanceTest.java
index 353367e..37e7afe 100644
--- a/modules/core/src/test/java/org/apache/ignite/lang/GridBasicPerformanceTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/lang/GridBasicPerformanceTest.java
@@ -748,14 +748,8 @@ public class GridBasicPerformanceTest {
for (int i = 0; i < MAX; i++) {
if (sort)
Arrays.binarySearch(arr, ThreadLocalRandom8.current().nextInt(lim));
- else {
- int val = ThreadLocalRandom8.current().nextInt(lim);
-
- for (long arrItem : arr) {
- if (arrItem == val)
- break;
- }
- }
+ else
+ F.contains(arr, ThreadLocalRandom8.current().nextInt(lim));
}
long time = System.currentTimeMillis() - start;
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/test/java/org/apache/ignite/lang/GridFuncPerformanceTest.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/lang/GridFuncPerformanceTest.java b/modules/core/src/test/java/org/apache/ignite/lang/GridFuncPerformanceTest.java
new file mode 100644
index 0000000..5afd75f
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/lang/GridFuncPerformanceTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.lang;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import org.apache.ignite.internal.util.lang.GridIterator;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.testframework.junits.common.GridCommonTest;
+
+/**
+ * GridFunc performance test.
+ */
+@GridCommonTest(group = "Lang")
+public class GridFuncPerformanceTest extends GridCommonAbstractTest {
+ /**
+ * Creates test.
+ */
+ public GridFuncPerformanceTest() {
+ super(/*start grid*/false);
+ }
+
+ /**
+ *
+ */
+ public void testTransformingIteratorPerformance() {
+ // Warmup.
+ testBody();
+ testBody();
+ testBody();
+
+ long r1 = testBody();
+ long r2 = testBody();
+ long r3 = testBody();
+
+ double r = (r1 + r2 + r3) / 3.f;
+
+ System.out.println("Average result is: " + Math.round(r) + "msec.");
+ }
+
+ /**
+ *
+ * @return Duration of the test.
+ */
+ @SuppressWarnings({"UnusedDeclaration"})
+ private long testBody() {
+ int MAX = 20000000;
+
+ Collection<Integer> l = new ArrayList<>(MAX);
+
+ for (int i = 0; i < MAX / 10; i++)
+ l.add(i);
+
+ IgniteClosure<Integer, Integer> c = new IgniteClosure<Integer, Integer>() {
+ @Override public Integer apply(Integer e) {
+ return e;
+ }
+ };
+
+ IgnitePredicate<Integer> p1 = new IgnitePredicate<Integer>() {
+ @Override public boolean apply(Integer e) {
+ return e % 2 == 0;
+ }
+ };
+ IgnitePredicate<Integer> p2 = new IgnitePredicate<Integer>() {
+ @Override public boolean apply(Integer e) {
+ return e % 2 != 0;
+ }
+ };
+
+ GridIterator<Integer> iter = F.iterator(l, c, true, p1, p2);
+
+ long n = 0;
+
+ long start = System.currentTimeMillis();
+
+ for (Integer i : iter)
+ n += i;
+
+ long duration = System.currentTimeMillis() - start;
+
+ System.out.println("Duration: " + duration + "msec.");
+
+ return duration;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/test/java/org/apache/ignite/loadtest/GridLoadTestStatistics.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtest/GridLoadTestStatistics.java b/modules/core/src/test/java/org/apache/ignite/loadtest/GridLoadTestStatistics.java
index 10b140e..5d517a4 100644
--- a/modules/core/src/test/java/org/apache/ignite/loadtest/GridLoadTestStatistics.java
+++ b/modules/core/src/test/java/org/apache/ignite/loadtest/GridLoadTestStatistics.java
@@ -107,7 +107,7 @@ public class GridLoadTestStatistics {
AtomicInteger cnt;
synchronized (nodeCnts) {
- cnt = F.addIfAbsent(nodeCnts, id, new AtomicInteger());
+ cnt = F.addIfAbsent(nodeCnts, id, F.newAtomicInt());
}
assert cnt != null;
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TreeIndex.java
----------------------------------------------------------------------
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TreeIndex.java
index 9582df0..28adeee 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TreeIndex.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TreeIndex.java
@@ -24,10 +24,10 @@ import java.util.Iterator;
import java.util.NavigableMap;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
+import org.apache.ignite.internal.util.GridEmptyIterator;
import org.apache.ignite.internal.util.offheap.unsafe.GridOffHeapSnapTreeMap;
import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeGuard;
import org.apache.ignite.internal.util.snaptree.SnapTreeMap;
-import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.SB;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.spi.indexing.IndexingQueryFilter;
@@ -319,7 +319,7 @@ public class GridH2TreeIndex extends GridH2IndexBase implements Comparator<GridS
comparable(last, 1));
if (range == null)
- return F.emptyIterator();
+ return new GridEmptyIterator<>();
return filter(range.values().iterator());
}
[09/11] ignite git commit: IGNITE-2206: Hadoop file system creation
is now abstracted out using factory interface.
Posted by vo...@apache.org.
IGNITE-2206: Hadoop file system creation is now abstracted out using factory interface.
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/8ed73b4a
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/8ed73b4a
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/8ed73b4a
Branch: refs/heads/ignite-2314
Commit: 8ed73b4af8024167daeb4775e084b1f6a23fbf13
Parents: 7d58d14
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Tue Jan 5 10:59:31 2016 +0400
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Tue Jan 5 10:59:31 2016 +0400
----------------------------------------------------------------------
.../org/apache/ignite/igfs/IgfsUserContext.java | 16 +-
.../igfs/secondary/IgfsSecondaryFileSystem.java | 14 -
.../processors/hadoop/HadoopPayloadAware.java | 28 ++
.../ignite/internal/processors/igfs/IgfsEx.java | 13 -
.../internal/processors/igfs/IgfsImpl.java | 16 +-
.../internal/processors/igfs/IgfsPaths.java | 62 +++-
.../igfs/IgfsSecondaryFileSystemImpl.java | 11 -
.../visor/node/VisorIgfsConfiguration.java | 43 ---
.../processors/igfs/IgfsAbstractSelfTest.java | 8 +-
.../igfs/IgfsExUniversalFileSystemAdapter.java | 11 +-
.../igfs/UniversalFileSystemAdapter.java | 5 +-
.../hadoop/fs/BasicHadoopFileSystemFactory.java | 209 ++++++++++++
.../fs/CachingHadoopFileSystemFactory.java | 86 +++++
.../hadoop/fs/HadoopFileSystemFactory.java | 52 +++
.../fs/IgniteHadoopIgfsSecondaryFileSystem.java | 264 +++++++--------
.../hadoop/fs/v1/IgniteHadoopFileSystem.java | 144 +++++---
.../hadoop/fs/v2/IgniteHadoopFileSystem.java | 115 ++++---
.../hadoop/SecondaryFileSystemProvider.java | 139 --------
.../hadoop/fs/HadoopFileSystemCacheUtils.java | 8 +-
.../hadoop/fs/HadoopLazyConcurrentMap.java | 5 +-
.../ignite/igfs/Hadoop1DualAbstractTest.java | 14 +-
.../igfs/HadoopFIleSystemFactorySelfTest.java | 326 +++++++++++++++++++
...oopFileSystemUniversalFileSystemAdapter.java | 53 +--
...oopSecondaryFileSystemConfigurationTest.java | 27 +-
.../IgniteHadoopFileSystemAbstractSelfTest.java | 71 ++--
.../testsuites/IgniteHadoopTestSuite.java | 3 +
26 files changed, 1191 insertions(+), 552 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java
index 8db4e23..1e1cd31 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java
@@ -34,24 +34,24 @@ public abstract class IgfsUserContext {
* The main contract of this method is that {@link #currentUser()} method invoked
* inside closure always returns 'user' this callable executed with.
* @param user the user name to invoke closure on behalf of.
- * @param clo the closure to execute
+ * @param c the closure to execute
* @param <T> The type of closure result.
* @return the result of closure execution.
* @throws IllegalArgumentException if user name is null or empty String or if the closure is null.
*/
- public static <T> T doAs(String user, final IgniteOutClosure<T> clo) {
+ public static <T> T doAs(String user, final IgniteOutClosure<T> c) {
if (F.isEmpty(user))
throw new IllegalArgumentException("Failed to use null or empty user name.");
final String ctxUser = userStackThreadLocal.get();
if (F.eq(ctxUser, user))
- return clo.apply(); // correct context is already there
+ return c.apply(); // correct context is already there
userStackThreadLocal.set(user);
try {
- return clo.apply();
+ return c.apply();
}
finally {
userStackThreadLocal.set(ctxUser);
@@ -81,24 +81,24 @@ public abstract class IgfsUserContext {
* }
* </pre>
* @param user the user name to invoke closure on behalf of.
- * @param clbl the Callable to execute
+ * @param c the Callable to execute
* @param <T> The type of callable result.
* @return the result of closure execution.
* @throws IllegalArgumentException if user name is null or empty String or if the closure is null.
*/
- public static <T> T doAs(String user, final Callable<T> clbl) throws Exception {
+ public static <T> T doAs(String user, final Callable<T> c) throws Exception {
if (F.isEmpty(user))
throw new IllegalArgumentException("Failed to use null or empty user name.");
final String ctxUser = userStackThreadLocal.get();
if (F.eq(ctxUser, user))
- return clbl.call(); // correct context is already there
+ return c.call(); // correct context is already there
userStackThreadLocal.set(user);
try {
- return clbl.call();
+ return c.call();
}
finally {
userStackThreadLocal.set(ctxUser);
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
index ca6ecb7..3f124eb 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
@@ -192,18 +192,4 @@ public interface IgfsSecondaryFileSystem {
* @throws IgniteException In case of error.
*/
public long usedSpaceSize() throws IgniteException;
-
- /**
- * Gets the implementation specific properties of file system.
- *
- * @return Map of properties.
- */
- public Map<String,String> properties();
-
-
- /**
- * Closes the secondary file system.
- * @throws IgniteException in case of an error.
- */
- public void close() throws IgniteException;
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopPayloadAware.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopPayloadAware.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopPayloadAware.java
new file mode 100644
index 0000000..9b79729
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopPayloadAware.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+/**
+ * Gets payload for Hadoop secondary file system.
+ */
+public interface HadoopPayloadAware {
+ /**
+ * @return Payload.
+ */
+ public Object getPayload();
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java
index 8ff7247..cf268e0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java
@@ -43,19 +43,6 @@ public interface IgfsEx extends IgniteFileSystem {
/** File property: prefer writes to local node. */
public static final String PROP_PREFER_LOCAL_WRITES = "locWrite";
- /** Property name for path to Hadoop configuration. */
- public static final String SECONDARY_FS_CONFIG_PATH = "SECONDARY_FS_CONFIG_PATH";
-
- /** Property name for URI of file system. */
- public static final String SECONDARY_FS_URI = "SECONDARY_FS_URI";
-
- /** Property name for default user name of file system.
- * NOTE: for secondary file system this is just a default user name, which is used
- * when the 2ndary filesystem is used outside of any user context.
- * If another user name is set in the context, 2ndary file system will work on behalf
- * of that user, which is different from the default. */
- public static final String SECONDARY_FS_USER_NAME = "SECONDARY_FS_USER_NAME";
-
/**
* Stops IGFS cleaning all used resources.
*
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java
index 680e660..38914ea 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java
@@ -72,6 +72,7 @@ import org.apache.ignite.internal.IgniteKernal;
import org.apache.ignite.internal.managers.communication.GridMessageListener;
import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager;
import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
+import org.apache.ignite.internal.processors.hadoop.HadoopPayloadAware;
import org.apache.ignite.internal.processors.task.GridInternal;
import org.apache.ignite.internal.util.GridSpinBusyLock;
import org.apache.ignite.internal.util.future.GridCompoundFuture;
@@ -87,6 +88,7 @@ import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteBiTuple;
import org.apache.ignite.lang.IgniteFuture;
import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.lifecycle.LifecycleAware;
import org.apache.ignite.resources.IgniteInstanceResource;
import org.apache.ignite.thread.IgniteThreadPoolExecutor;
import org.jetbrains.annotations.Nullable;
@@ -200,6 +202,9 @@ public final class IgfsImpl implements IgfsEx {
data = igfsCtx.data();
secondaryFs = cfg.getSecondaryFileSystem();
+ if (secondaryFs instanceof LifecycleAware)
+ ((LifecycleAware) secondaryFs).start();
+
/* Default IGFS mode. */
IgfsMode dfltMode;
@@ -256,8 +261,12 @@ public final class IgfsImpl implements IgfsEx {
modeRslvr = new IgfsModeResolver(dfltMode, modes);
- secondaryPaths = new IgfsPaths(secondaryFs == null ? null : secondaryFs.properties(), dfltMode,
- modeRslvr.modesOrdered());
+ Object secondaryFsPayload = null;
+
+ if (secondaryFs instanceof HadoopPayloadAware)
+ secondaryFsPayload = ((HadoopPayloadAware) secondaryFs).getPayload();
+
+ secondaryPaths = new IgfsPaths(secondaryFsPayload, dfltMode, modeRslvr.modesOrdered());
// Check whether IGFS LRU eviction policy is set on data cache.
String dataCacheName = igfsCtx.configuration().getDataCacheName();
@@ -305,7 +314,8 @@ public final class IgfsImpl implements IgfsEx {
batch.cancel();
try {
- secondaryFs.close();
+ if (secondaryFs instanceof LifecycleAware)
+ ((LifecycleAware)secondaryFs).stop();
}
catch (Exception e) {
log.error("Failed to close secondary file system.", e);
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsPaths.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsPaths.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsPaths.java
index fbf89ce..4a79259 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsPaths.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsPaths.java
@@ -17,17 +17,21 @@
package org.apache.ignite.internal.processors.igfs;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.List;
-import java.util.Map;
+
+import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.igfs.IgfsMode;
import org.apache.ignite.igfs.IgfsPath;
import org.apache.ignite.internal.util.typedef.T2;
import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.marshaller.jdk.JdkMarshaller;
import org.jetbrains.annotations.Nullable;
/**
@@ -37,8 +41,8 @@ public class IgfsPaths implements Externalizable {
/** */
private static final long serialVersionUID = 0L;
- /** Additional secondary file system properties. */
- private Map<String, String> props;
+ /** */
+ private byte[] payloadBytes;
/** Default IGFS mode. */
private IgfsMode dfltMode;
@@ -56,22 +60,25 @@ public class IgfsPaths implements Externalizable {
/**
* Constructor.
*
- * @param props Additional secondary file system properties.
+ * @param payload Payload.
* @param dfltMode Default IGFS mode.
* @param pathModes Path modes.
+ * @throws IgniteCheckedException If failed.
*/
- public IgfsPaths(Map<String, String> props, IgfsMode dfltMode, @Nullable List<T2<IgfsPath,
- IgfsMode>> pathModes) {
- this.props = props;
+ public IgfsPaths(Object payload, IgfsMode dfltMode, @Nullable List<T2<IgfsPath, IgfsMode>> pathModes)
+ throws IgniteCheckedException {
this.dfltMode = dfltMode;
this.pathModes = pathModes;
- }
- /**
- * @return Secondary file system properties.
- */
- public Map<String, String> properties() {
- return props;
+ if (payload == null)
+ payloadBytes = null;
+ else {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+
+ new JdkMarshaller().marshal(payload, out);
+
+ payloadBytes = out.toByteArray();
+ }
}
/**
@@ -88,9 +95,25 @@ public class IgfsPaths implements Externalizable {
return pathModes;
}
+ /**
+ * @return Payload.
+ *
+ * @throws IgniteCheckedException If failed to deserialize the payload.
+ */
+ @Nullable public Object getPayload(ClassLoader clsLdr) throws IgniteCheckedException {
+ if (payloadBytes == null)
+ return null;
+ else {
+ ByteArrayInputStream in = new ByteArrayInputStream(payloadBytes);
+
+ return new JdkMarshaller().unmarshal(in, clsLdr);
+ }
+ }
+
/** {@inheritDoc} */
@Override public void writeExternal(ObjectOutput out) throws IOException {
- U.writeStringMap(out, props);
+ U.writeByteArray(out, payloadBytes);
+
U.writeEnum(out, dfltMode);
if (pathModes != null) {
@@ -98,7 +121,10 @@ public class IgfsPaths implements Externalizable {
out.writeInt(pathModes.size());
for (T2<IgfsPath, IgfsMode> pathMode : pathModes) {
+ assert pathMode.getKey() != null;
+
pathMode.getKey().writeExternal(out);
+
U.writeEnum(out, pathMode.getValue());
}
}
@@ -108,7 +134,8 @@ public class IgfsPaths implements Externalizable {
/** {@inheritDoc} */
@Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- props = U.readStringMap(in);
+ payloadBytes = U.readByteArray(in);
+
dfltMode = IgfsMode.fromOrdinal(in.readByte());
if (in.readBoolean()) {
@@ -118,11 +145,10 @@ public class IgfsPaths implements Externalizable {
for (int i = 0; i < size; i++) {
IgfsPath path = new IgfsPath();
- path.readExternal(in);
- T2<IgfsPath, IgfsMode> entry = new T2<>(path, IgfsMode.fromOrdinal(in.readByte()));
+ path.readExternal(in);
- pathModes.add(entry);
+ pathModes.add(new T2<>(path, IgfsMode.fromOrdinal(in.readByte())));
}
}
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java
index 23d6322..44e858f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java
@@ -19,7 +19,6 @@ package org.apache.ignite.internal.processors.igfs;
import java.io.OutputStream;
import java.util.Collection;
-import java.util.Collections;
import java.util.Map;
import org.apache.ignite.IgniteException;
import org.apache.ignite.igfs.IgfsFile;
@@ -116,14 +115,4 @@ class IgfsSecondaryFileSystemImpl implements IgfsSecondaryFileSystem {
@Override public long usedSpaceSize() throws IgniteException {
return igfs.usedSpaceSize();
}
-
- /** {@inheritDoc} */
- @Override public Map<String, String> properties() {
- return Collections.emptyMap();
- }
-
- /** {@inheritDoc} */
- @Override public void close() throws IgniteException {
- // No-op.
- }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java
index e85484d..ea0e721 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java
@@ -29,9 +29,6 @@ import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.jetbrains.annotations.Nullable;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_CONFIG_PATH;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_URI;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_USER_NAME;
import static org.apache.ignite.internal.visor.util.VisorTaskUtils.compactClass;
/**
@@ -65,15 +62,6 @@ public class VisorIgfsConfiguration implements Serializable {
/** Number of batches that can be concurrently sent to remote node. */
private int perNodeParallelBatchCnt;
- /** URI of the secondary Hadoop file system. */
- private String secondaryHadoopFileSysUri;
-
- /** Path for the secondary hadoop file system config. */
- private String secondaryHadoopFileSysCfgPath;
-
- /** User name for the secondary hadoop file system config. */
- private String secondaryHadoopFileSysUserName;
-
/** IGFS instance mode. */
private IgfsMode dfltMode;
@@ -141,16 +129,6 @@ public class VisorIgfsConfiguration implements Serializable {
cfg.perNodeBatchSize = igfs.getPerNodeBatchSize();
cfg.perNodeParallelBatchCnt = igfs.getPerNodeParallelBatchCount();
- IgfsSecondaryFileSystem secFs = igfs.getSecondaryFileSystem();
-
- if (secFs != null) {
- Map<String, String> props = secFs.properties();
-
- cfg.secondaryHadoopFileSysUri = props.get(SECONDARY_FS_URI);
- cfg.secondaryHadoopFileSysCfgPath = props.get(SECONDARY_FS_CONFIG_PATH);
- cfg.secondaryHadoopFileSysUserName = props.get(SECONDARY_FS_USER_NAME);
- }
-
cfg.dfltMode = igfs.getDefaultMode();
cfg.pathModes = igfs.getPathModes();
cfg.dualModePutExecutorSrvc = compactClass(igfs.getDualModePutExecutorService());
@@ -251,27 +229,6 @@ public class VisorIgfsConfiguration implements Serializable {
}
/**
- * @return URI of the secondary Hadoop file system.
- */
- @Nullable public String secondaryHadoopFileSystemUri() {
- return secondaryHadoopFileSysUri;
- }
-
- /**
- * @return User name of the secondary Hadoop file system.
- */
- @Nullable public String secondaryHadoopFileSystemUserName() {
- return secondaryHadoopFileSysUserName;
- }
-
- /**
- * @return Path for the secondary hadoop file system config.
- */
- @Nullable public String secondaryHadoopFileSystemConfigPath() {
- return secondaryHadoopFileSysCfgPath;
- }
-
- /**
* @return IGFS instance mode.
*/
public IgfsMode defaultMode() {
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java
index b290303..015b992 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java
@@ -2744,7 +2744,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest {
finally {
U.closeQuiet(os);
- IgfsEx igfsEx = uni.getAdapter(IgfsEx.class);
+ IgfsEx igfsEx = uni.unwrap(IgfsEx.class);
if (igfsEx != null)
awaitFileClose(igfsEx.asSecondary(), file);
@@ -2868,7 +2868,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest {
* @throws IgniteCheckedException If failed.
*/
protected void checkExist(UniversalFileSystemAdapter uni, IgfsPath... paths) throws IgniteCheckedException {
- IgfsEx ex = uni.getAdapter(IgfsEx.class);
+ IgfsEx ex = uni.unwrap(IgfsEx.class);
for (IgfsPath path : paths) {
if (ex != null)
assert ex.context().meta().fileId(path) != null : "Path doesn't exist [igfs=" + ex.name() +
@@ -2921,7 +2921,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest {
* @throws Exception If failed.
*/
protected void checkNotExist(UniversalFileSystemAdapter uni, IgfsPath... paths) throws Exception {
- IgfsEx ex = uni.getAdapter(IgfsEx.class);
+ IgfsEx ex = uni.unwrap(IgfsEx.class);
for (IgfsPath path : paths) {
if (ex != null)
assert ex.context().meta().fileId(path) == null : "Path exists [igfs=" + ex.name() + ", path=" +
@@ -3222,7 +3222,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest {
*/
@SuppressWarnings("unchecked")
public static void clear(UniversalFileSystemAdapter uni) throws Exception {
- IgfsEx igfsEx = uni.getAdapter(IgfsEx.class);
+ IgfsEx igfsEx = uni.unwrap(IgfsEx.class);
if (igfsEx != null)
clear(igfsEx);
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsExUniversalFileSystemAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsExUniversalFileSystemAdapter.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsExUniversalFileSystemAdapter.java
index 7583364..c6bef72 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsExUniversalFileSystemAdapter.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsExUniversalFileSystemAdapter.java
@@ -28,7 +28,6 @@ import org.apache.ignite.igfs.IgfsPath;
* Universal adapter over {@link IgfsEx} filesystem.
*/
public class IgfsExUniversalFileSystemAdapter implements UniversalFileSystemAdapter {
-
/** The wrapped igfs. */
private final IgfsEx igfsEx;
@@ -69,18 +68,14 @@ public class IgfsExUniversalFileSystemAdapter implements UniversalFileSystemAdap
@Override public boolean delete(String path, boolean recursive) throws IOException {
IgfsPath igfsPath = new IgfsPath(path);
- boolean del = igfsEx.delete(igfsPath, recursive);
-
- return del;
+ return igfsEx.delete(igfsPath, recursive);
}
/** {@inheritDoc} */
@Override public InputStream openInputStream(String path) throws IOException {
IgfsPath igfsPath = new IgfsPath(path);
- IgfsInputStreamAdapter adapter = igfsEx.open(igfsPath);
-
- return adapter;
+ return igfsEx.open(igfsPath);
}
/** {@inheritDoc} */
@@ -97,7 +92,7 @@ public class IgfsExUniversalFileSystemAdapter implements UniversalFileSystemAdap
}
/** {@inheritDoc} */
- @Override public <T> T getAdapter(Class<T> clazz) {
+ @Override public <T> T unwrap(Class<T> clazz) {
if (clazz == IgfsEx.class)
return (T)igfsEx;
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/UniversalFileSystemAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/UniversalFileSystemAdapter.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/UniversalFileSystemAdapter.java
index ba8c164..eef0057 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/UniversalFileSystemAdapter.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/UniversalFileSystemAdapter.java
@@ -31,8 +31,9 @@ public interface UniversalFileSystemAdapter {
/**
* Gets name of the FS.
* @return name of this file system.
+ * @throws IOException in case of failure.
*/
- String name();
+ String name() throws IOException;
/**
* Answers if a file denoted by path exists.
@@ -93,5 +94,5 @@ public interface UniversalFileSystemAdapter {
* @param <T> The type we need to adapt to.
* @return the adapter object of the given type.
*/
- <T> T getAdapter(Class<T> clazz);
+ <T> T unwrap(Class<T> clazz);
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
new file mode 100644
index 0000000..1e2bbf2
--- /dev/null
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleAware;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.Arrays;
+
+/**
+ * Simple Hadoop file system factory which delegates to {@code FileSystem.get()} on each call.
+ * <p>
+ * If {@code "fs.[prefix].impl.disable.cache"} is set to {@code true}, file system instances will be cached by Hadoop.
+ */
+public class BasicHadoopFileSystemFactory implements HadoopFileSystemFactory, Externalizable, LifecycleAware {
+ /** */
+ private static final long serialVersionUID = 0L;
+
+ /** File system URI. */
+ protected String uri;
+
+ /** File system config paths. */
+ protected String[] cfgPaths;
+
+ /** Configuration of the secondary filesystem, never null. */
+ protected transient Configuration cfg;
+
+ /** Resulting URI. */
+ protected transient URI fullUri;
+
+ /**
+ * Constructor.
+ */
+ public BasicHadoopFileSystemFactory() {
+ // No-op.
+ }
+
+ /** {@inheritDoc} */
+ @Override public FileSystem get(String usrName) throws IOException {
+ return create0(IgfsUtils.fixUserName(usrName));
+ }
+
+ /**
+ * Internal file system create routine.
+ *
+ * @param usrName User name.
+ * @return File system.
+ * @throws IOException If failed.
+ */
+ protected FileSystem create0(String usrName) throws IOException {
+ assert cfg != null;
+
+ try {
+ return FileSystem.get(fullUri, cfg, usrName);
+ }
+ catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+
+ throw new IOException("Failed to create file system due to interrupt.", e);
+ }
+ }
+
+ /**
+ * Gets file system URI.
+ * <p>
+ * This URI will be used as a first argument when calling {@link FileSystem#get(URI, Configuration, String)}.
+ * <p>
+ * If not set, default URI will be picked from file system configuration using
+ * {@link FileSystem#getDefaultUri(Configuration)} method.
+ *
+ * @return File system URI.
+ */
+ @Nullable public String getUri() {
+ return uri;
+ }
+
+ /**
+ * Sets file system URI. See {@link #getUri()} for more information.
+ *
+ * @param uri File system URI.
+ */
+ public void setUri(@Nullable String uri) {
+ this.uri = uri;
+ }
+
+ /**
+ * Gets paths to additional file system configuration files (e.g. core-site.xml).
+ * <p>
+ * Path could be either absolute or relative to {@code IGNITE_HOME} environment variable.
+ * <p>
+ * All provided paths will be loaded in the order they provided and then applied to {@link Configuration}. It means
+ * that path order might be important in some cases.
+ * <p>
+ * <b>NOTE!</b> Factory can be serialized and transferred to other machines where instance of
+ * {@link IgniteHadoopFileSystem} resides. Corresponding paths must exist on these machines as well.
+ *
+ * @return Paths to file system configuration files.
+ */
+ @Nullable public String[] getConfigPaths() {
+ return cfgPaths;
+ }
+
+ /**
+ * Set paths to additional file system configuration files (e.g. core-site.xml). See {@link #getConfigPaths()} for
+ * more information.
+ *
+ * @param cfgPaths Paths to file system configuration files.
+ */
+ public void setConfigPaths(String... cfgPaths) {
+ this.cfgPaths = cfgPaths;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void start() throws IgniteException {
+ cfg = HadoopUtils.safeCreateConfiguration();
+
+ if (cfgPaths != null) {
+ for (String cfgPath : cfgPaths) {
+ if (cfgPath == null)
+ throw new NullPointerException("Configuration path cannot be null: " + Arrays.toString(cfgPaths));
+ else {
+ URL url = U.resolveIgniteUrl(cfgPath);
+
+ if (url == null) {
+ // If secConfPath is given, it should be resolvable:
+ throw new IgniteException("Failed to resolve secondary file system configuration path " +
+ "(ensure that it exists locally and you have read access to it): " + cfgPath);
+ }
+
+ cfg.addResource(url);
+ }
+ }
+ }
+
+ // If secondary fs URI is not given explicitly, try to get it from the configuration:
+ if (uri == null)
+ fullUri = FileSystem.getDefaultUri(cfg);
+ else {
+ try {
+ fullUri = new URI(uri);
+ }
+ catch (URISyntaxException use) {
+ throw new IgniteException("Failed to resolve secondary file system URI: " + uri);
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override public void stop() throws IgniteException {
+ // No-op.
+ }
+
+ /** {@inheritDoc} */
+ @Override public void writeExternal(ObjectOutput out) throws IOException {
+ U.writeString(out, uri);
+
+ if (cfgPaths != null) {
+ out.writeInt(cfgPaths.length);
+
+ for (String cfgPath : cfgPaths)
+ U.writeString(out, cfgPath);
+ }
+ else
+ out.writeInt(-1);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ uri = U.readString(in);
+
+ int cfgPathsCnt = in.readInt();
+
+ if (cfgPathsCnt != -1) {
+ cfgPaths = new String[cfgPathsCnt];
+
+ for (int i = 0; i < cfgPathsCnt; i++)
+ cfgPaths[i] = U.readString(in);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
new file mode 100644
index 0000000..91f7777
--- /dev/null
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Caching Hadoop file system factory. Caches {@link FileSystem} instances on per-user basis. Doesn't rely on
+ * built-in Hadoop {@code FileSystem} caching mechanics. Separate {@code FileSystem} instance is created for each
+ * user instead.
+ * <p>
+ * This makes cache instance resistant to concurrent calls to {@link FileSystem#close()} in other parts of the user
+ * code. On the other hand, this might cause problems on some environments. E.g. if Kerberos is enabled, a call to
+ * {@link FileSystem#get(URI, Configuration, String)} will refresh Kerberos token. But this factory implementation
+ * calls this method only once per user what may lead to token expiration. In such cases it makes sense to either
+ * use {@link BasicHadoopFileSystemFactory} or implement your own factory.
+ */
+public class CachingHadoopFileSystemFactory extends BasicHadoopFileSystemFactory {
+ /** */
+ private static final long serialVersionUID = 0L;
+
+ /** Per-user file system cache. */
+ private final transient HadoopLazyConcurrentMap<String, FileSystem> cache = new HadoopLazyConcurrentMap<>(
+ new HadoopLazyConcurrentMap.ValueFactory<String, FileSystem>() {
+ @Override public FileSystem createValue(String key) throws IOException {
+ return create0(key);
+ }
+ }
+ );
+
+ /**
+ * Public non-arg constructor.
+ */
+ public CachingHadoopFileSystemFactory() {
+ // noop
+ }
+
+ /** {@inheritDoc} */
+ @Override public FileSystem get(String usrName) throws IOException {
+ return cache.getOrCreate(IgfsUtils.fixUserName(usrName));
+ }
+
+ /** {@inheritDoc} */
+ @Override public void start() throws IgniteException {
+ super.start();
+
+ // Disable caching.
+ cfg.setBoolean(HadoopFileSystemsUtils.disableFsCachePropertyName(fullUri.getScheme()), true);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void stop() throws IgniteException {
+ super.stop();
+
+ try {
+ cache.close();
+ }
+ catch (IgniteCheckedException ice) {
+ throw new IgniteException(ice);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
new file mode 100644
index 0000000..5ad08ab
--- /dev/null
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.igfs.IgfsMode;
+import org.apache.ignite.lifecycle.LifecycleAware;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * Factory for Hadoop {@link FileSystem} used by {@link IgniteHadoopIgfsSecondaryFileSystem}.
+ * <p>
+ * {@link #get(String)} method will be used whenever a call to a target {@code FileSystem} is required.
+ * <p>
+ * It is implementation dependent whether to rely on built-in Hadoop file system cache, implement own caching facility
+ * or doesn't cache file systems at all.
+ * <p>
+ * Concrete factory may implement {@link LifecycleAware} interface. In this case start and stop callbacks will be
+ * performed by Ignite. You may want to implement some initialization or cleanup there.
+ * <p>
+ * Note that factory extends {@link Serializable} interface as it might be necessary to transfer factories over the
+ * wire to {@link IgniteHadoopFileSystem} if {@link IgfsMode#PROXY} is enabled for some file
+ * system paths.
+ */
+public interface HadoopFileSystemFactory extends Serializable {
+ /**
+ * Gets file system for the given user name.
+ *
+ * @param usrName User name
+ * @return File system.
+ * @throws IOException In case of error.
+ */
+ public FileSystem get(String usrName) throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
index 1ca6938..9f544c1 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
@@ -17,15 +17,7 @@
package org.apache.ignite.hadoop.fs;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.ParentNotDirectoryException;
@@ -35,6 +27,7 @@ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteFileSystem;
import org.apache.ignite.igfs.IgfsDirectoryNotEmptyException;
import org.apache.ignite.igfs.IgfsException;
import org.apache.ignite.igfs.IgfsFile;
@@ -45,71 +38,59 @@ import org.apache.ignite.igfs.IgfsPathNotFoundException;
import org.apache.ignite.igfs.IgfsUserContext;
import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystemPositionedReadable;
-import org.apache.ignite.internal.processors.hadoop.SecondaryFileSystemProvider;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap.ValueFactory;
+import org.apache.ignite.internal.processors.hadoop.HadoopPayloadAware;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProperties;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsSecondaryFileSystemPositionedReadable;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
import org.apache.ignite.internal.processors.igfs.IgfsFileImpl;
import org.apache.ignite.internal.processors.igfs.IgfsFileInfo;
import org.apache.ignite.internal.processors.igfs.IgfsUtils;
import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.lang.IgniteOutClosure;
+import org.apache.ignite.lifecycle.LifecycleAware;
import org.jetbrains.annotations.Nullable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.Callable;
+
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_GROUP_NAME;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_PERMISSION;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_USER_NAME;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_CONFIG_PATH;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_URI;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_USER_NAME;
/**
- * Adapter to use any Hadoop file system {@link FileSystem} as {@link IgfsSecondaryFileSystem}.
- * In fact, this class deals with different FileSystems depending on the user context,
- * see {@link IgfsUserContext#currentUser()}.
+ * Secondary file system which delegates calls to an instance of Hadoop {@link FileSystem}.
+ * <p>
+ * Target {@code FileSystem}'s are created on per-user basis using passed {@link HadoopFileSystemFactory}.
*/
-public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSystem {
- /** Properties of file system, see {@link #properties()}
- *
- * See {@link IgfsEx#SECONDARY_FS_CONFIG_PATH}
- * See {@link IgfsEx#SECONDARY_FS_URI}
- * See {@link IgfsEx#SECONDARY_FS_USER_NAME}
- * */
- private final Map<String, String> props = new HashMap<>();
-
- /** Secondary file system provider. */
- private final SecondaryFileSystemProvider secProvider;
-
+public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSystem, LifecycleAware,
+ HadoopPayloadAware {
/** The default user name. It is used if no user context is set. */
- private final String dfltUserName;
+ private String dfltUsrName;
- /** FileSystem instance created for the default user.
- * Stored outside the fileSysLazyMap due to performance reasons. */
- private final FileSystem dfltFs;
+ /** Factory. */
+ private HadoopFileSystemFactory fsFactory;
- /** Lazy per-user cache for the file systems. It is cleared and nulled in #close() method. */
- private final HadoopLazyConcurrentMap<String, FileSystem> fileSysLazyMap = new HadoopLazyConcurrentMap<>(
- new ValueFactory<String, FileSystem>() {
- @Override public FileSystem createValue(String key) {
- try {
- assert !F.isEmpty(key);
-
- return secProvider.createFileSystem(key);
- }
- catch (IOException ioe) {
- throw new IgniteException(ioe);
- }
- }
- }
- );
+ /**
+ * Default constructor for Spring.
+ */
+ public IgniteHadoopIgfsSecondaryFileSystem() {
+ // No-op.
+ }
/**
* Simple constructor that is to be used by default.
*
* @param uri URI of file system.
* @throws IgniteCheckedException In case of error.
+ * @deprecated Use {@link #getFileSystemFactory()} instead.
*/
+ @Deprecated
public IgniteHadoopIgfsSecondaryFileSystem(String uri) throws IgniteCheckedException {
this(uri, null, null);
}
@@ -120,7 +101,9 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
* @param uri URI of file system.
* @param cfgPath Additional path to Hadoop configuration.
* @throws IgniteCheckedException In case of error.
+ * @deprecated Use {@link #getFileSystemFactory()} instead.
*/
+ @Deprecated
public IgniteHadoopIgfsSecondaryFileSystem(@Nullable String uri, @Nullable String cfgPath)
throws IgniteCheckedException {
this(uri, cfgPath, null);
@@ -131,46 +114,73 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
*
* @param uri URI of file system.
* @param cfgPath Additional path to Hadoop configuration.
- * @param userName User name.
+ * @param usrName User name.
* @throws IgniteCheckedException In case of error.
+ * @deprecated Use {@link #getFileSystemFactory()} instead.
*/
+ @Deprecated
public IgniteHadoopIgfsSecondaryFileSystem(@Nullable String uri, @Nullable String cfgPath,
- @Nullable String userName) throws IgniteCheckedException {
- // Treat empty uri and userName arguments as nulls to improve configuration usability:
- if (F.isEmpty(uri))
- uri = null;
-
- if (F.isEmpty(cfgPath))
- cfgPath = null;
-
- if (F.isEmpty(userName))
- userName = null;
+ @Nullable String usrName) throws IgniteCheckedException {
+ setDefaultUserName(usrName);
- this.dfltUserName = IgfsUtils.fixUserName(userName);
+ CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
- try {
- this.secProvider = new SecondaryFileSystemProvider(uri, cfgPath);
+ fac.setUri(uri);
- // File system creation for the default user name.
- // The value is *not* stored in the 'fileSysLazyMap' cache, but saved in field:
- this.dfltFs = secProvider.createFileSystem(dfltUserName);
- }
- catch (IOException e) {
- throw new IgniteCheckedException(e);
- }
+ if (cfgPath != null)
+ fac.setConfigPaths(cfgPath);
- assert dfltFs != null;
+ setFileSystemFactory(fac);
+ }
- uri = secProvider.uri().toString();
+ /**
+ * Gets default user name.
+ * <p>
+ * Defines user name which will be used during file system invocation in case no user name is defined explicitly
+ * through {@link FileSystem#get(URI, Configuration, String)}.
+ * <p>
+ * Also this name will be used if you manipulate {@link IgniteFileSystem} directly and do not set user name
+ * explicitly using {@link IgfsUserContext#doAs(String, IgniteOutClosure)} or
+ * {@link IgfsUserContext#doAs(String, Callable)} methods.
+ * <p>
+ * If not set value of system property {@code "user.name"} will be used. If this property is not set either,
+ * {@code "anonymous"} will be used.
+ *
+ * @return Default user name.
+ */
+ @Nullable public String getDefaultUserName() {
+ return dfltUsrName;
+ }
- if (!uri.endsWith("/"))
- uri += "/";
+ /**
+ * Sets default user name. See {@link #getDefaultUserName()} for details.
+ *
+ * @param dfltUsrName Default user name.
+ */
+ public void setDefaultUserName(@Nullable String dfltUsrName) {
+ this.dfltUsrName = dfltUsrName;
+ }
- if (cfgPath != null)
- props.put(SECONDARY_FS_CONFIG_PATH, cfgPath);
+ /**
+ * Gets secondary file system factory.
+ * <p>
+ * This factory will be used whenever a call to a target {@link FileSystem} is required.
+ * <p>
+ * If not set, {@link CachingHadoopFileSystemFactory} will be used.
+ *
+ * @return Secondary file system factory.
+ */
+ public HadoopFileSystemFactory getFileSystemFactory() {
+ return fsFactory;
+ }
- props.put(SECONDARY_FS_URI, uri);
- props.put(SECONDARY_FS_USER_NAME, dfltUserName);
+ /**
+ * Sets secondary file system factory. See {@link #getFileSystemFactory()} for details.
+ *
+ * @param factory Secondary file system factory.
+ */
+ public void setFileSystemFactory(HadoopFileSystemFactory factory) {
+ this.fsFactory = factory;
}
/**
@@ -180,7 +190,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
* @return Hadoop path.
*/
private Path convert(IgfsPath path) {
- URI uri = fileSysForUser().getUri();
+ URI uri = fileSystemForUser().getUri();
return new Path(uri.getScheme(), uri.getAuthority(), path.toString());
}
@@ -234,7 +244,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public boolean exists(IgfsPath path) {
try {
- return fileSysForUser().exists(convert(path));
+ return fileSystemForUser().exists(convert(path));
}
catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to check file existence [path=" + path + "]");
@@ -245,7 +255,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
@Nullable @Override public IgfsFile update(IgfsPath path, Map<String, String> props) {
HadoopIgfsProperties props0 = new HadoopIgfsProperties(props);
- final FileSystem fileSys = fileSysForUser();
+ final FileSystem fileSys = fileSystemForUser();
try {
if (props0.userName() != null || props0.groupName() != null)
@@ -266,7 +276,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
@Override public void rename(IgfsPath src, IgfsPath dest) {
// Delegate to the secondary file system.
try {
- if (!fileSysForUser().rename(convert(src), convert(dest)))
+ if (!fileSystemForUser().rename(convert(src), convert(dest)))
throw new IgfsException("Failed to rename (secondary file system returned false) " +
"[src=" + src + ", dest=" + dest + ']');
}
@@ -278,7 +288,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public boolean delete(IgfsPath path, boolean recursive) {
try {
- return fileSysForUser().delete(convert(path), recursive);
+ return fileSystemForUser().delete(convert(path), recursive);
}
catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to delete file [path=" + path + ", recursive=" + recursive + "]");
@@ -288,7 +298,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public void mkdirs(IgfsPath path) {
try {
- if (!fileSysForUser().mkdirs(convert(path)))
+ if (!fileSystemForUser().mkdirs(convert(path)))
throw new IgniteException("Failed to make directories [path=" + path + "]");
}
catch (IOException e) {
@@ -299,7 +309,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public void mkdirs(IgfsPath path, @Nullable Map<String, String> props) {
try {
- if (!fileSysForUser().mkdirs(convert(path), new HadoopIgfsProperties(props).permission()))
+ if (!fileSystemForUser().mkdirs(convert(path), new HadoopIgfsProperties(props).permission()))
throw new IgniteException("Failed to make directories [path=" + path + ", props=" + props + "]");
}
catch (IOException e) {
@@ -310,7 +320,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public Collection<IgfsPath> listPaths(IgfsPath path) {
try {
- FileStatus[] statuses = fileSysForUser().listStatus(convert(path));
+ FileStatus[] statuses = fileSystemForUser().listStatus(convert(path));
if (statuses == null)
throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
@@ -333,7 +343,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public Collection<IgfsFile> listFiles(IgfsPath path) {
try {
- FileStatus[] statuses = fileSysForUser().listStatus(convert(path));
+ FileStatus[] statuses = fileSystemForUser().listStatus(convert(path));
if (statuses == null)
throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
@@ -360,13 +370,13 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) {
- return new HadoopIgfsSecondaryFileSystemPositionedReadable(fileSysForUser(), convert(path), bufSize);
+ return new HadoopIgfsSecondaryFileSystemPositionedReadable(fileSystemForUser(), convert(path), bufSize);
}
/** {@inheritDoc} */
@Override public OutputStream create(IgfsPath path, boolean overwrite) {
try {
- return fileSysForUser().create(convert(path), overwrite);
+ return fileSystemForUser().create(convert(path), overwrite);
}
catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to create file [path=" + path + ", overwrite=" + overwrite + "]");
@@ -380,8 +390,8 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
new HadoopIgfsProperties(props != null ? props : Collections.<String, String>emptyMap());
try {
- return fileSysForUser().create(convert(path), props0.permission(), overwrite, bufSize,
- (short)replication, blockSize, null);
+ return fileSystemForUser().create(convert(path), props0.permission(), overwrite, bufSize,
+ (short) replication, blockSize, null);
}
catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to create file [path=" + path + ", props=" + props +
@@ -394,7 +404,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
@Override public OutputStream append(IgfsPath path, int bufSize, boolean create,
@Nullable Map<String, String> props) {
try {
- return fileSysForUser().append(convert(path), bufSize);
+ return fileSystemForUser().append(convert(path), bufSize);
}
catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to append file [path=" + path + ", bufSize=" + bufSize + "]");
@@ -404,7 +414,7 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
/** {@inheritDoc} */
@Override public IgfsFile info(final IgfsPath path) {
try {
- final FileStatus status = fileSysForUser().getFileStatus(convert(path));
+ final FileStatus status = fileSystemForUser().getFileStatus(convert(path));
if (status == null)
return null;
@@ -479,65 +489,61 @@ public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSys
try {
// We don't use FileSystem#getUsed() since it counts only the files
// in the filesystem root, not all the files recursively.
- return fileSysForUser().getContentSummary(new Path("/")).getSpaceConsumed();
+ return fileSystemForUser().getContentSummary(new Path("/")).getSpaceConsumed();
}
catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to get used space size of file system.");
}
}
- /** {@inheritDoc} */
- @Override public Map<String, String> properties() {
- return props;
- }
-
- /** {@inheritDoc} */
- @Override public void close() throws IgniteException {
- Exception e = null;
-
- try {
- dfltFs.close();
- }
- catch (Exception e0) {
- e = e0;
- }
-
- try {
- fileSysLazyMap.close();
- }
- catch (IgniteCheckedException ice) {
- if (e == null)
- e = ice;
- }
-
- if (e != null)
- throw new IgniteException(e);
- }
-
/**
* Gets the underlying {@link FileSystem}.
* This method is used solely for testing.
* @return the underlying Hadoop {@link FileSystem}.
*/
public FileSystem fileSystem() {
- return fileSysForUser();
+ return fileSystemForUser();
}
/**
* Gets the FileSystem for the current context user.
* @return the FileSystem instance, never null.
*/
- private FileSystem fileSysForUser() {
+ private FileSystem fileSystemForUser() {
String user = IgfsUserContext.currentUser();
if (F.isEmpty(user))
- user = dfltUserName; // default is never empty.
+ user = IgfsUtils.fixUserName(dfltUsrName);
assert !F.isEmpty(user);
- if (F.eq(user, dfltUserName))
- return dfltFs; // optimization
+ try {
+ return fsFactory.get(user);
+ }
+ catch (IOException ioe) {
+ throw new IgniteException(ioe);
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override public void start() throws IgniteException {
+ dfltUsrName = IgfsUtils.fixUserName(dfltUsrName);
+
+ if (fsFactory == null)
+ fsFactory = new CachingHadoopFileSystemFactory();
+
+ if (fsFactory instanceof LifecycleAware)
+ ((LifecycleAware) fsFactory).start();
+ }
- return fileSysLazyMap.getOrCreate(user);
+ /** {@inheritDoc} */
+ @Override public void stop() throws IgniteException {
+ if (fsFactory instanceof LifecycleAware)
+ ((LifecycleAware)fsFactory).stop();
+ }
+
+ /** {@inheritDoc} */
+ @Override public HadoopFileSystemFactory getPayload() {
+ return fsFactory;
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
index 5dce67f..71f6435 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
@@ -17,19 +17,6 @@
package org.apache.ignite.hadoop.fs.v1;
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
@@ -43,7 +30,9 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
+import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
import org.apache.ignite.igfs.IgfsBlockLocation;
import org.apache.ignite.igfs.IgfsException;
import org.apache.ignite.igfs.IgfsFile;
@@ -51,7 +40,6 @@ import org.apache.ignite.igfs.IgfsMode;
import org.apache.ignite.igfs.IgfsPath;
import org.apache.ignite.igfs.IgfsPathSummary;
import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.processors.hadoop.SecondaryFileSystemProvider;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsInputStream;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyInputStream;
@@ -68,8 +56,23 @@ import org.apache.ignite.internal.util.typedef.X;
import org.apache.ignite.internal.util.typedef.internal.A;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleAware;
import org.jetbrains.annotations.Nullable;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_BATCH_SIZE;
import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_DIR;
import static org.apache.ignite.igfs.IgfsMode.PROXY;
@@ -85,8 +88,6 @@ import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_GROUP_NAME;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_PERMISSION;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_PREFER_LOCAL_WRITES;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_USER_NAME;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_CONFIG_PATH;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_URI;
/**
* {@code IGFS} Hadoop 1.x file system driver over file system API. To use
@@ -165,8 +166,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
/** IGFS mode resolver. */
private IgfsModeResolver modeRslvr;
- /** Secondary file system instance. */
- private FileSystem secondaryFs;
+ /** The secondary file system factory. */
+ private HadoopFileSystemFactory factory;
/** Management connection flag. */
private boolean mgmt;
@@ -327,21 +328,28 @@ public class IgniteHadoopFileSystem extends FileSystem {
}
if (initSecondary) {
- Map<String, String> props = paths.properties();
+ try {
+ factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
+ }
+ catch (IgniteCheckedException e) {
+ throw new IOException("Failed to get secondary file system factory.", e);
+ }
+
+ assert factory != null;
- String secUri = props.get(SECONDARY_FS_URI);
- String secConfPath = props.get(SECONDARY_FS_CONFIG_PATH);
+ if (factory instanceof LifecycleAware)
+ ((LifecycleAware) factory).start();
try {
- SecondaryFileSystemProvider secProvider = new SecondaryFileSystemProvider(secUri, secConfPath);
+ FileSystem secFs = factory.get(user);
- secondaryFs = secProvider.createFileSystem(user);
+ secondaryUri = secFs.getUri();
- secondaryUri = secProvider.uri();
+ A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
}
catch (IOException e) {
if (!mgmt)
- throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
+ throw new IOException("Failed to connect to the secondary file system: " + secondaryUri, e);
else
LOG.warn("Visor failed to create secondary file system (operations on paths with PROXY mode " +
"will have no effect): " + e.getMessage());
@@ -409,8 +417,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
if (clientLog.isLogEnabled())
clientLog.close();
- if (secondaryFs != null)
- U.closeQuiet(secondaryFs);
+ if (factory instanceof LifecycleAware)
+ ((LifecycleAware) factory).stop();
// Reset initialized resources.
uri = null;
@@ -425,6 +433,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
A.notNull(p, "p");
if (mode(p) == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -453,6 +463,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
A.notNull(p, "p");
if (mode(p) == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -482,6 +494,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
try {
if (mode(p) == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -490,8 +504,7 @@ public class IgniteHadoopFileSystem extends FileSystem {
}
secondaryFs.setOwner(toSecondary(p), username, grpName);
- }
- else if (rmtClient.update(convert(p), F.asMap(PROP_USER_NAME, username, PROP_GROUP_NAME, grpName)) == null)
+ } else if (rmtClient.update(convert(p), F.asMap(PROP_USER_NAME, username, PROP_GROUP_NAME, grpName)) == null)
throw new IOException("Failed to set file permission (file not found?)" +
" [path=" + p + ", userName=" + username + ", groupName=" + grpName + ']');
}
@@ -511,6 +524,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
IgfsMode mode = mode(path);
if (mode == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -583,6 +598,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
if (mode == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -664,6 +681,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
", path=" + path + ", bufSize=" + bufSize + ']');
if (mode == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -727,6 +746,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
IgfsMode mode = mode(srcPath);
if (mode == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -787,6 +808,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
IgfsMode mode = mode(path);
if (mode == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -832,6 +855,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
IgfsMode mode = mode(path);
if (mode == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -896,26 +921,35 @@ public class IgniteHadoopFileSystem extends FileSystem {
/** {@inheritDoc} */
@Override public void setWorkingDirectory(Path newPath) {
- if (newPath == null) {
- Path homeDir = getHomeDirectory();
+ try {
+ if (newPath == null) {
+ Path homeDir = getHomeDirectory();
- if (secondaryFs != null)
- secondaryFs.setWorkingDirectory(toSecondary(homeDir));
+ FileSystem secondaryFs = secondaryFileSystem();
- workingDir = homeDir;
- }
- else {
- Path fixedNewPath = fixRelativePart(newPath);
+ if (secondaryFs != null)
+ secondaryFs.setWorkingDirectory(toSecondary(homeDir));
+
+ workingDir = homeDir;
+ }
+ else {
+ Path fixedNewPath = fixRelativePart(newPath);
- String res = fixedNewPath.toUri().getPath();
+ String res = fixedNewPath.toUri().getPath();
- if (!DFSUtil.isValidName(res))
- throw new IllegalArgumentException("Invalid DFS directory name " + res);
+ if (!DFSUtil.isValidName(res))
+ throw new IllegalArgumentException("Invalid DFS directory name " + res);
- if (secondaryFs != null)
- secondaryFs.setWorkingDirectory(toSecondary(fixedNewPath));
+ FileSystem secondaryFs = secondaryFileSystem();
- workingDir = fixedNewPath;
+ if (secondaryFs != null)
+ secondaryFs.setWorkingDirectory(toSecondary(fixedNewPath));
+
+ workingDir = fixedNewPath;
+ }
+ }
+ catch (IOException e) {
+ throw new RuntimeException("Failed to obtain secondary file system instance.", e);
}
}
@@ -936,6 +970,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
IgfsMode mode = mode(path);
if (mode == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -977,6 +1013,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
try {
if (mode(f) == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -1007,6 +1045,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
try {
if (mode(f) == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -1038,6 +1078,8 @@ public class IgniteHadoopFileSystem extends FileSystem {
IgfsPath path = convert(status.getPath());
if (mode(status.getPath()) == PROXY) {
+ final FileSystem secondaryFs = secondaryFileSystem();
+
if (secondaryFs == null) {
assert mgmt;
@@ -1103,7 +1145,7 @@ public class IgniteHadoopFileSystem extends FileSystem {
* @return {@code true} If secondary file system is initialized.
*/
public boolean hasSecondaryFileSystem() {
- return secondaryFs != null;
+ return factory != null;
}
/**
@@ -1123,7 +1165,7 @@ public class IgniteHadoopFileSystem extends FileSystem {
* @return Secondary file system path.
*/
private Path toSecondary(Path path) {
- assert secondaryFs != null;
+ assert factory != null;
assert secondaryUri != null;
return convertPath(path, secondaryUri);
@@ -1298,4 +1340,16 @@ public class IgniteHadoopFileSystem extends FileSystem {
public String user() {
return user;
}
+
+ /**
+ * Gets cached or creates a {@link FileSystem}.
+ *
+ * @return The secondary file system.
+ */
+ private @Nullable FileSystem secondaryFileSystem() throws IOException{
+ if (factory == null)
+ return null;
+
+ return factory.get(user);
+ }
}
\ No newline at end of file
[02/11] ignite git commit: Merge remote-tracking branch
'remotes/origin/ignite-1.5' into ignite-1.5.1-2
Posted by vo...@apache.org.
Merge remote-tracking branch 'remotes/origin/ignite-1.5' into ignite-1.5.1-2
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/cca90c77
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/cca90c77
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/cca90c77
Branch: refs/heads/ignite-2314
Commit: cca90c77f56db5d96f99610ca8dc48f6066ac1b3
Parents: 8e6b365 f1f8cda
Author: Anton Vinogradov <av...@apache.org>
Authored: Wed Dec 30 09:57:01 2015 +0300
Committer: Anton Vinogradov <av...@apache.org>
Committed: Wed Dec 30 09:57:01 2015 +0300
----------------------------------------------------------------------
assembly/release-fabric-base.xml | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
[10/11] ignite git commit: Merge branch 'ignite-1.5.1-2'
Posted by vo...@apache.org.
Merge branch 'ignite-1.5.1-2'
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/10012b4e
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/10012b4e
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/10012b4e
Branch: refs/heads/ignite-2314
Commit: 10012b4ef96f2ae727dc27c28185938d016ab9e3
Parents: 8ed73b4 cca90c7
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Tue Jan 5 11:03:06 2016 +0400
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Tue Jan 5 11:03:06 2016 +0400
----------------------------------------------------------------------
assembly/release-fabric-base.xml | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
[11/11] ignite git commit: Merge branch 'master' into ignite-2314
Posted by vo...@apache.org.
Merge branch 'master' into ignite-2314
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/58c045a6
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/58c045a6
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/58c045a6
Branch: refs/heads/ignite-2314
Commit: 58c045a6ecc23aec6166bcf443264cbb61800b99
Parents: 51cdfd4 10012b4
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Mon Jan 11 18:28:24 2016 +0300
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Mon Jan 11 18:28:24 2016 +0300
----------------------------------------------------------------------
assembly/release-fabric-base.xml | 1 +
.../org/apache/ignite/igfs/IgfsUserContext.java | 16 +-
.../igfs/secondary/IgfsSecondaryFileSystem.java | 14 -
.../apache/ignite/internal/IgniteKernal.java | 27 +-
.../internal/cluster/ClusterGroupAdapter.java | 10 +-
.../discovery/GridDiscoveryManager.java | 19 +-
.../loadbalancer/GridLoadBalancerManager.java | 8 +-
.../processors/cache/GridCacheAdapter.java | 14 +-
.../processors/cache/GridCacheContext.java | 3 +-
.../cache/GridCacheEvictionManager.java | 5 +-
.../processors/cache/GridCacheIoManager.java | 5 +-
.../processors/cache/GridCacheIterator.java | 4 +-
.../processors/cache/GridCacheKeySet.java | 2 +-
.../GridCachePartitionExchangeManager.java | 12 +-
.../processors/cache/GridCacheProcessor.java | 9 +-
.../processors/cache/GridCacheSwapManager.java | 7 +-
.../processors/cache/GridCacheUtils.java | 92 -
.../cache/GridCacheValueCollection.java | 5 +-
.../processors/cache/IgniteCacheProxy.java | 4 +-
.../dht/GridClientPartitionTopology.java | 3 +-
.../dht/GridDhtPartitionTopologyImpl.java | 3 +-
.../dht/GridDhtTransactionalCacheAdapter.java | 3 +-
.../distributed/dht/GridDhtTxLocalAdapter.java | 3 +-
.../distributed/dht/GridDhtTxPrepareFuture.java | 5 +-
.../dht/atomic/GridDhtAtomicCache.java | 21 +-
.../dht/preloader/GridDhtForceKeysFuture.java | 8 +-
.../distributed/near/GridNearCacheAdapter.java | 11 +-
.../local/atomic/GridLocalAtomicCache.java | 17 +-
.../GridCacheAtomicStampedImpl.java | 5 +-
.../processors/hadoop/HadoopPayloadAware.java | 28 +
.../ignite/internal/processors/igfs/IgfsEx.java | 13 -
.../internal/processors/igfs/IgfsImpl.java | 16 +-
.../internal/processors/igfs/IgfsPaths.java | 62 +-
.../internal/processors/igfs/IgfsProcessor.java | 8 +-
.../igfs/IgfsSecondaryFileSystemImpl.java | 11 -
.../dotnet/PlatformDotNetCacheStore.java | 4 +-
.../top/GridTopologyCommandHandler.java | 3 +-
.../org/apache/ignite/internal/util/F0.java | 325 +++-
.../internal/util/GridExecutionStatistics.java | 4 +-
.../ignite/internal/util/IgniteUtils.java | 4 +-
.../ignite/internal/util/lang/GridFunc.java | 1764 ++++++++++++++----
.../ignite/internal/util/lang/GridTuple3.java | 1 +
.../ignite/internal/util/lang/GridTuple4.java | 1 +
.../ignite/internal/util/lang/GridTuple5.java | 1 +
.../ignite/internal/util/lang/GridTuple6.java | 1 +
.../ignite/internal/util/lang/GridTupleV.java | 1 +
.../visor/node/VisorIgfsConfiguration.java | 43 -
.../java/org/apache/ignite/lang/IgniteUuid.java | 2 +-
.../memory/MemoryEventStorageSpi.java | 1 -
.../cache/GridCacheLuceneQueryIndexTest.java | 4 +-
.../distributed/GridCacheEventAbstractTest.java | 2 +-
.../processors/igfs/IgfsAbstractSelfTest.java | 8 +-
.../igfs/IgfsExUniversalFileSystemAdapter.java | 11 +-
.../igfs/UniversalFileSystemAdapter.java | 5 +-
.../IpcSharedMemoryCrashDetectionSelfTest.java | 16 +-
.../ignite/lang/GridBasicPerformanceTest.java | 10 +-
.../ignite/lang/GridFuncPerformanceTest.java | 102 +
.../ignite/loadtest/GridLoadTestStatistics.java | 2 +-
.../hadoop/fs/BasicHadoopFileSystemFactory.java | 209 +++
.../fs/CachingHadoopFileSystemFactory.java | 86 +
.../hadoop/fs/HadoopFileSystemFactory.java | 52 +
.../fs/IgniteHadoopIgfsSecondaryFileSystem.java | 264 +--
.../hadoop/fs/v1/IgniteHadoopFileSystem.java | 144 +-
.../hadoop/fs/v2/IgniteHadoopFileSystem.java | 115 +-
.../processors/hadoop/HadoopClassLoader.java | 689 +++++--
.../hadoop/SecondaryFileSystemProvider.java | 139 --
.../hadoop/fs/HadoopFileSystemCacheUtils.java | 8 +-
.../hadoop/fs/HadoopLazyConcurrentMap.java | 5 +-
.../hadoop/v2/HadoopNativeCodeLoader.java | 74 -
.../ignite/igfs/Hadoop1DualAbstractTest.java | 14 +-
.../igfs/HadoopFIleSystemFactorySelfTest.java | 326 ++++
...oopFileSystemUniversalFileSystemAdapter.java | 53 +-
...oopSecondaryFileSystemConfigurationTest.java | 27 +-
.../IgniteHadoopFileSystemAbstractSelfTest.java | 71 +-
.../hadoop/HadoopAbstractWordCountTest.java | 46 +-
.../hadoop/HadoopClassLoaderTest.java | 101 +-
.../hadoop/HadoopMapReduceEmbeddedSelfTest.java | 2 +-
.../processors/hadoop/HadoopMapReduceTest.java | 15 +-
.../hadoop/HadoopSnappyFullMapReduceTest.java | 28 +
.../processors/hadoop/HadoopSnappyTest.java | 102 +
.../processors/hadoop/HadoopTasksV2Test.java | 2 +-
.../hadoop/deps/CircularWIthHadoop.java | 32 +
.../hadoop/deps/CircularWithoutHadoop.java | 27 +
.../processors/hadoop/deps/WithCast.java | 41 +
.../hadoop/deps/WithClassAnnotation.java | 28 +
.../hadoop/deps/WithConstructorInvocation.java | 31 +
.../processors/hadoop/deps/WithExtends.java | 27 +
.../processors/hadoop/deps/WithField.java | 29 +
.../processors/hadoop/deps/WithImplements.java | 36 +
.../hadoop/deps/WithIndirectField.java | 27 +
.../processors/hadoop/deps/WithInitializer.java | 33 +
.../processors/hadoop/deps/WithInnerClass.java | 31 +
.../hadoop/deps/WithLocalVariable.java | 38 +
.../hadoop/deps/WithMethodAnnotation.java | 32 +
.../hadoop/deps/WithMethodArgument.java | 31 +
.../hadoop/deps/WithMethodCheckedException.java | 31 +
.../hadoop/deps/WithMethodInvocation.java | 31 +
.../hadoop/deps/WithMethodReturnType.java | 31 +
.../hadoop/deps/WithMethodRuntimeException.java | 31 +
.../processors/hadoop/deps/WithOuterClass.java | 38 +
.../hadoop/deps/WithParameterAnnotation.java | 31 +
.../processors/hadoop/deps/WithStaticField.java | 29 +
.../hadoop/deps/WithStaticInitializer.java | 34 +
.../processors/hadoop/deps/Without.java | 25 +
.../hadoop/examples/HadoopWordCount1Reduce.java | 1 +
.../hadoop/examples/HadoopWordCount2.java | 18 +-
.../examples/HadoopWordCount2Reducer.java | 1 +
.../testsuites/IgniteHadoopTestSuite.java | 24 +-
.../query/h2/opt/GridH2TreeIndex.java | 4 +-
109 files changed, 4729 insertions(+), 1404 deletions(-)
----------------------------------------------------------------------
[07/11] ignite git commit: IGNITE-2218: Fixed a problem with native
Hadoop libraries load. This closes #378.
Posted by vo...@apache.org.
IGNITE-2218: Fixed a problem with native Hadoop libraries load. This closes #378.
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/7d58d14a
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/7d58d14a
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/7d58d14a
Branch: refs/heads/ignite-2314
Commit: 7d58d14a80b1c32f88fbb4cf68e5d289c5aee474
Parents: 012ca73
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Mon Jan 4 12:14:58 2016 +0400
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Mon Jan 4 12:14:58 2016 +0400
----------------------------------------------------------------------
.../processors/hadoop/HadoopClassLoader.java | 71 ++++++++++---
.../hadoop/v2/HadoopNativeCodeLoader.java | 74 --------------
.../hadoop/HadoopAbstractWordCountTest.java | 46 +++++++--
.../hadoop/HadoopMapReduceEmbeddedSelfTest.java | 2 +-
.../processors/hadoop/HadoopMapReduceTest.java | 15 ++-
.../hadoop/HadoopSnappyFullMapReduceTest.java | 28 +++++
.../processors/hadoop/HadoopSnappyTest.java | 102 +++++++++++++++++++
.../processors/hadoop/HadoopTasksV2Test.java | 2 +-
.../hadoop/examples/HadoopWordCount1Reduce.java | 1 +
.../hadoop/examples/HadoopWordCount2.java | 18 +++-
.../examples/HadoopWordCount2Reducer.java | 1 +
.../testsuites/IgniteHadoopTestSuite.java | 18 +++-
12 files changed, 279 insertions(+), 99 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
index 735133f..270b31d 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
@@ -30,13 +30,14 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
-
+import java.util.Vector;
+import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.internal.processors.hadoop.v2.HadoopDaemon;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopNativeCodeLoader;
import org.apache.ignite.internal.processors.hadoop.v2.HadoopShutdownHookManager;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
import org.jetbrains.annotations.Nullable;
import org.jsr166.ConcurrentHashMap8;
import org.objectweb.asm.AnnotationVisitor;
@@ -69,6 +70,9 @@ public class HadoopClassLoader extends URLClassLoader {
/** Name of the Hadoop daemon class. */
public static final String HADOOP_DAEMON_CLASS_NAME = "org.apache.hadoop.util.Daemon";
+ /** Name of libhadoop library. */
+ private static final String LIBHADOOP = "hadoop.";
+
/** */
private static final URLClassLoader APP_CLS_LDR = (URLClassLoader)HadoopClassLoader.class.getClassLoader();
@@ -119,6 +123,51 @@ public class HadoopClassLoader extends URLClassLoader {
assert !(getParent() instanceof HadoopClassLoader);
this.name = name;
+
+ initializeNativeLibraries();
+ }
+
+ /**
+ * Workaround to load native Hadoop libraries. Java doesn't allow native libraries to be loaded from different
+ * classloaders. But we load Hadoop classes many times and one of these classes - {@code NativeCodeLoader} - tries
+ * to load the same native library over and over again.
+ * <p>
+ * To fix the problem, we force native library load in parent class loader and then "link" handle to this native
+ * library to our class loader. As a result, our class loader will think that the library is already loaded and will
+ * be able to link native methods.
+ *
+ * @see <a href="http://docs.oracle.com/javase/1.5.0/docs/guide/jni/spec/invocation.html#library_version">
+ * JNI specification</a>
+ */
+ private void initializeNativeLibraries() {
+ try {
+ // This must trigger native library load.
+ Class.forName(NativeCodeLoader.class.getName(), true, APP_CLS_LDR);
+
+ final Vector<Object> curVector = U.field(this, "nativeLibraries");
+
+ ClassLoader ldr = APP_CLS_LDR;
+
+ while (ldr != null) {
+ Vector vector = U.field(ldr, "nativeLibraries");
+
+ for (Object lib : vector) {
+ String libName = U.field(lib, "name");
+
+ if (libName.contains(LIBHADOOP)) {
+ curVector.add(lib);
+
+ return;
+ }
+ }
+
+ ldr = ldr.getParent();
+ }
+ }
+ catch (Exception e) {
+ U.quietAndWarn(null, "Failed to initialize Hadoop native library " +
+ "(native Hadoop methods might not work properly): " + e);
+ }
}
/**
@@ -152,8 +201,6 @@ public class HadoopClassLoader extends URLClassLoader {
if (isHadoop(name)) { // Always load Hadoop classes explicitly, since Hadoop can be available in App classpath.
if (name.endsWith(".util.ShutdownHookManager")) // Dirty hack to get rid of Hadoop shutdown hooks.
return loadFromBytes(name, HadoopShutdownHookManager.class.getName());
- else if (name.endsWith(".util.NativeCodeLoader"))
- return loadFromBytes(name, HadoopNativeCodeLoader.class.getName());
else if (name.equals(HADOOP_DAEMON_CLASS_NAME))
// We replace this in order to be able to forcibly stop some daemon threads
// that otherwise never stop (e.g. PeerCache runnables):
@@ -274,7 +321,7 @@ public class HadoopClassLoader extends URLClassLoader {
/**
* Check whether class has external dependencies on Hadoop.
- *
+ *
* @param clsName Class name.
* @return {@code True} if class has external dependencies.
*/
@@ -285,15 +332,15 @@ public class HadoopClassLoader extends URLClassLoader {
ctx.mthdVisitor = new CollectingMethodVisitor(ctx, ctx.annVisitor);
ctx.fldVisitor = new CollectingFieldVisitor(ctx, ctx.annVisitor);
ctx.clsVisitor = new CollectingClassVisitor(ctx, ctx.annVisitor, ctx.mthdVisitor, ctx.fldVisitor);
-
+
return hasExternalDependencies(clsName, ctx);
}
-
+
/**
* Check whether class has external dependencies on Hadoop.
- *
+ *
* @param clsName Class name.
- * @param ctx Context.
+ * @param ctx Context.
* @return {@code true} If the class has external dependencies.
*/
boolean hasExternalDependencies(String clsName, CollectingContext ctx) {
@@ -519,7 +566,7 @@ public class HadoopClassLoader extends URLClassLoader {
/** Field visitor. */
private FieldVisitor fldVisitor;
-
+
/** Class visitor. */
private ClassVisitor clsVisitor;
@@ -627,7 +674,7 @@ public class HadoopClassLoader extends URLClassLoader {
onType(t);
}
}
- }
+ }
/**
* Annotation visitor.
@@ -638,7 +685,7 @@ public class HadoopClassLoader extends URLClassLoader {
/**
* Annotation visitor.
- *
+ *
* @param ctx The collector.
*/
CollectingAnnotationVisitor(CollectingContext ctx) {
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopNativeCodeLoader.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopNativeCodeLoader.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopNativeCodeLoader.java
deleted file mode 100644
index 4c4840d..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopNativeCodeLoader.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * A fake helper to load the native hadoop code i.e. libhadoop.so.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class HadoopNativeCodeLoader {
- /**
- * Check if native-hadoop code is loaded for this platform.
- *
- * @return <code>true</code> if native-hadoop is loaded,
- * else <code>false</code>
- */
- public static boolean isNativeCodeLoaded() {
- return false;
- }
-
- /**
- * Returns true only if this build was compiled with support for snappy.
- */
- public static boolean buildSupportsSnappy() {
- return false;
- }
-
- /**
- * @return Library name.
- */
- public static String getLibraryName() {
- throw new IllegalStateException();
- }
-
- /**
- * Return if native hadoop libraries, if present, can be used for this job.
- * @param conf configuration
- *
- * @return <code>true</code> if native hadoop libraries, if present, can be
- * used for this job; <code>false</code> otherwise.
- */
- public boolean getLoadNativeLibraries(Configuration conf) {
- return false;
- }
-
- /**
- * Set if native hadoop libraries, if present, can be used for this job.
- *
- * @param conf configuration
- * @param loadNativeLibraries can native hadoop libraries be loaded
- */
- public void setLoadNativeLibraries(Configuration conf, boolean loadNativeLibraries) {
- // No-op.
- }
-}
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
index a47eaf6..e45c127 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
@@ -19,6 +19,7 @@ package org.apache.ignite.internal.processors.hadoop;
import com.google.common.base.Joiner;
import java.io.BufferedReader;
+import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.util.ArrayList;
@@ -26,6 +27,11 @@ import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.ignite.igfs.IgfsPath;
import org.apache.ignite.internal.processors.igfs.IgfsEx;
@@ -118,21 +124,49 @@ public abstract class HadoopAbstractWordCountTest extends HadoopAbstractSelfTest
}
/**
+ * Read w/o decoding (default).
+ *
+ * @param fileName The file.
+ * @return The file contents, human-readable.
+ * @throws Exception On error.
+ */
+ protected String readAndSortFile(String fileName) throws Exception {
+ return readAndSortFile(fileName, null);
+ }
+
+ /**
* Reads whole text file into String.
*
* @param fileName Name of the file to read.
* @return Content of the file as String value.
* @throws Exception If could not read the file.
*/
- protected String readAndSortFile(String fileName) throws Exception {
- BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(new IgfsPath(fileName))));
+ protected String readAndSortFile(String fileName, Configuration conf) throws Exception {
+ final List<String> list = new ArrayList<>();
+
+ final boolean snappyDecode = conf != null && conf.getBoolean(FileOutputFormat.COMPRESS, false);
+
+ if (snappyDecode) {
+ try (SequenceFile.Reader reader = new SequenceFile.Reader(conf,
+ SequenceFile.Reader.file(new Path(fileName)))) {
+ Text key = new Text();
- List<String> list = new ArrayList<>();
+ IntWritable val = new IntWritable();
- String line;
+ while (reader.next(key, val))
+ list.add(key + "\t" + val);
+ }
+ }
+ else {
+ try (InputStream is0 = igfs.open(new IgfsPath(fileName))) {
+ BufferedReader reader = new BufferedReader(new InputStreamReader(is0));
+
+ String line;
- while ((line = reader.readLine()) != null)
- list.add(line);
+ while ((line = reader.readLine()) != null)
+ list.add(line);
+ }
+ }
Collections.sort(list);
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
index c0eff48..25ef382 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
@@ -106,7 +106,7 @@ public class HadoopMapReduceEmbeddedSelfTest extends HadoopMapReduceTest {
Job job = Job.getInstance(jobConf);
- HadoopWordCount2.setTasksClasses(job, useNewAPI, useNewAPI, useNewAPI);
+ HadoopWordCount2.setTasksClasses(job, useNewAPI, useNewAPI, useNewAPI, false);
if (useNewAPI) {
job.setPartitionerClass(CustomV2Partitioner.class);
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
index d0bd92b..7fd8272 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
@@ -183,7 +183,7 @@ public class HadoopMapReduceTest extends HadoopAbstractWordCountTest {
Job job = Job.getInstance(jobConf);
- HadoopWordCount2.setTasksClasses(job, useNewMapper, useNewCombiner, useNewReducer);
+ HadoopWordCount2.setTasksClasses(job, useNewMapper, useNewCombiner, useNewReducer, compressOutputSnappy());
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
@@ -207,18 +207,29 @@ public class HadoopMapReduceTest extends HadoopAbstractWordCountTest {
checkOwner(new IgfsPath(outFile));
+ String actual = readAndSortFile(outFile, job.getConfiguration());
+
assertEquals("Use new mapper: " + useNewMapper + ", new combiner: " + useNewCombiner + ", new reducer: " +
useNewReducer,
"blue\t" + blue + "\n" +
"green\t" + green + "\n" +
"red\t" + red + "\n" +
"yellow\t" + yellow + "\n",
- readAndSortFile(outFile)
+ actual
);
}
}
/**
+ * Gets if to compress output data with Snappy.
+ *
+ * @return If to compress output data with Snappy.
+ */
+ protected boolean compressOutputSnappy() {
+ return false;
+ }
+
+ /**
* Simple test job statistics.
*
* @param jobId Job id.
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java
new file mode 100644
index 0000000..22d33a5
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+/**
+ * Same test as HadoopMapReduceTest, but with enabled Snappy output compression.
+ */
+public class HadoopSnappyFullMapReduceTest extends HadoopMapReduceTest {
+ /** {@inheritDoc} */
+ @Override protected boolean compressOutputSnappy() {
+ return true;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java
new file mode 100644
index 0000000..014ff1e
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.util.Arrays;
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.SnappyCodec;
+import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Tests isolated Hadoop Snappy codec usage.
+ */
+public class HadoopSnappyTest extends GridCommonAbstractTest {
+ /** Length of data. */
+ private static final int BYTE_SIZE = 1024 * 50;
+
+ /**
+ * Checks Snappy codec usage.
+ *
+ * @throws Exception On error.
+ */
+ public void testSnappy() throws Throwable {
+ // Run Snappy test in default class loader:
+ checkSnappy();
+
+ // Run the same in several more class loaders simulating jobs and tasks:
+ for (int i = 0; i < 2; i++) {
+ ClassLoader hadoopClsLdr = new HadoopClassLoader(null, "cl-" + i);
+
+ Class<?> cls = (Class)Class.forName(HadoopSnappyTest.class.getName(), true, hadoopClsLdr);
+
+ assertEquals(hadoopClsLdr, cls.getClassLoader());
+
+ U.invoke(cls, null, "checkSnappy");
+ }
+ }
+
+ /**
+ * Internal check routine.
+ *
+ * @throws Throwable If failed.
+ */
+ public static void checkSnappy() throws Throwable {
+ try {
+ byte[] expBytes = new byte[BYTE_SIZE];
+ byte[] actualBytes = new byte[BYTE_SIZE];
+
+ for (int i = 0; i < expBytes.length ; i++)
+ expBytes[i] = (byte)ThreadLocalRandom.current().nextInt(16);
+
+ SnappyCodec codec = new SnappyCodec();
+
+ codec.setConf(new Configuration());
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+
+ try (CompressionOutputStream cos = codec.createOutputStream(baos)) {
+ cos.write(expBytes);
+ cos.flush();
+ }
+
+ try (CompressionInputStream cis = codec.createInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
+ int read = cis.read(actualBytes, 0, actualBytes.length);
+
+ assert read == actualBytes.length;
+ }
+
+ assert Arrays.equals(expBytes, actualBytes);
+ }
+ catch (Throwable e) {
+ System.out.println("Snappy check failed:");
+ System.out.println("### NativeCodeLoader.isNativeCodeLoaded: " + NativeCodeLoader.isNativeCodeLoaded());
+ System.out.println("### SnappyCompressor.isNativeCodeLoaded: " + SnappyCompressor.isNativeCodeLoaded());
+
+ throw e;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java
index 3a964d6..d125deb 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java
@@ -48,7 +48,7 @@ public class HadoopTasksV2Test extends HadoopTasksAllVersionsTest {
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
- HadoopWordCount2.setTasksClasses(job, true, true, true);
+ HadoopWordCount2.setTasksClasses(job, true, true, true, false);
Configuration conf = job.getConfiguration();
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java
index 120ac19..2335911 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java
@@ -47,6 +47,7 @@ public class HadoopWordCount1Reduce extends MapReduceBase implements Reducer<Tex
output.collect(key, new IntWritable(sum));
}
+ /** {@inheritDoc} */
@Override public void configure(JobConf job) {
super.configure(job);
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java
index 942a908..4b508ca 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java
@@ -20,11 +20,14 @@ package org.apache.ignite.internal.processors.hadoop.examples;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
/**
@@ -62,7 +65,7 @@ public class HadoopWordCount2 {
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
- setTasksClasses(job, true, true, true);
+ setTasksClasses(job, true, true, true, false);
FileInputFormat.setInputPaths(job, new Path(input));
FileOutputFormat.setOutputPath(job, new Path(output));
@@ -80,7 +83,8 @@ public class HadoopWordCount2 {
* @param setCombiner Option to set combiner class.
* @param setReducer Option to set reducer and output format classes.
*/
- public static void setTasksClasses(Job job, boolean setMapper, boolean setCombiner, boolean setReducer) {
+ public static void setTasksClasses(Job job, boolean setMapper, boolean setCombiner, boolean setReducer,
+ boolean outputCompression) {
if (setMapper) {
job.setMapperClass(HadoopWordCount2Mapper.class);
job.setInputFormatClass(TextInputFormat.class);
@@ -93,5 +97,15 @@ public class HadoopWordCount2 {
job.setReducerClass(HadoopWordCount2Reducer.class);
job.setOutputFormatClass(TextOutputFormat.class);
}
+
+ if (outputCompression) {
+ job.setOutputFormatClass(SequenceFileOutputFormat.class);
+
+ SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK);
+
+ SequenceFileOutputFormat.setCompressOutput(job, true);
+
+ job.getConfiguration().set(FileOutputFormat.COMPRESS_CODEC, SnappyCodec.class.getName());
+ }
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java
index b2be53e..63a9d95 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java
@@ -55,6 +55,7 @@ public class HadoopWordCount2Reducer extends Reducer<Text, IntWritable, Text, In
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
super.setup(context);
+
wasSetUp = true;
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/7d58d14a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
index 1831085..6c542b5 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
@@ -25,6 +25,8 @@ import java.io.IOException;
import java.net.URL;
import java.net.URLConnection;
import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.List;
import junit.framework.TestSuite;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
@@ -63,6 +65,7 @@ import org.apache.ignite.internal.processors.hadoop.HadoopJobTrackerSelfTest;
import org.apache.ignite.internal.processors.hadoop.HadoopMapReduceEmbeddedSelfTest;
import org.apache.ignite.internal.processors.hadoop.HadoopMapReduceTest;
import org.apache.ignite.internal.processors.hadoop.HadoopSerializationWrapperSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopSnappyTest;
import org.apache.ignite.internal.processors.hadoop.HadoopSortingTest;
import org.apache.ignite.internal.processors.hadoop.HadoopSplitWrapperSelfTest;
import org.apache.ignite.internal.processors.hadoop.HadoopTaskExecutionSelfTest;
@@ -70,6 +73,7 @@ import org.apache.ignite.internal.processors.hadoop.HadoopTasksV1Test;
import org.apache.ignite.internal.processors.hadoop.HadoopTasksV2Test;
import org.apache.ignite.internal.processors.hadoop.HadoopV2JobSelfTest;
import org.apache.ignite.internal.processors.hadoop.HadoopValidationSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopSnappyFullMapReduceTest;
import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopConcurrentHashMultimapSelftest;
import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopHashMapSelfTest;
import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopSkipListSelfTest;
@@ -96,6 +100,9 @@ public class IgniteHadoopTestSuite extends TestSuite {
TestSuite suite = new TestSuite("Ignite Hadoop MR Test Suite");
+ suite.addTest(new TestSuite(ldr.loadClass(HadoopSnappyTest.class.getName())));
+ suite.addTest(new TestSuite(ldr.loadClass(HadoopSnappyFullMapReduceTest.class.getName())));
+
suite.addTest(new TestSuite(ldr.loadClass(HadoopClassLoaderTest.class.getName())));
suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfs20FileSystemLoopbackPrimarySelfTest.class.getName())));
@@ -192,7 +199,7 @@ public class IgniteHadoopTestSuite extends TestSuite {
X.println("Will use Hadoop version: " + ver);
- String downloadPath = "hadoop/common/hadoop-" + ver + "/hadoop-" + ver + ".tar.gz";
+ String downloadPath = "hadoop/core/hadoop-" + ver + "/hadoop-" + ver + ".tar.gz";
download("Hadoop", "HADOOP_HOME", downloadPath, "hadoop-" + ver);
}
@@ -217,6 +224,7 @@ public class IgniteHadoopTestSuite extends TestSuite {
}
List<String> urls = F.asList(
+ "http://archive.apache.org/dist/",
"http://apache-mirror.rbc.ru/pub/apache/",
"http://www.eu.apache.org/dist/",
"http://www.us.apache.org/dist/");
@@ -273,6 +281,14 @@ public class IgniteHadoopTestSuite extends TestSuite {
if (!dest.mkdirs())
throw new IllegalStateException();
}
+ else if (entry.isSymbolicLink()) {
+ // Important: in Hadoop installation there are symlinks, we need to create them:
+ Path theLinkItself = Paths.get(install.getAbsolutePath(), entry.getName());
+
+ Path linkTarget = Paths.get(entry.getLinkName());
+
+ Files.createSymbolicLink(theLinkItself, linkTarget);
+ }
else {
File parent = dest.getParentFile();
[04/11] ignite git commit: Revert "IGNITE-2330: Simplified GridFunc."
Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridFunc.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridFunc.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridFunc.java
index 015be5c..0678657 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridFunc.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/GridFunc.java
@@ -35,10 +35,16 @@ import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import javax.cache.Cache;
+import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.compute.ComputeJobResult;
+import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridConcurrentHashSet;
import org.apache.ignite.internal.util.GridEmptyIterator;
@@ -46,6 +52,7 @@ import org.apache.ignite.internal.util.GridLeanMap;
import org.apache.ignite.internal.util.GridLeanSet;
import org.apache.ignite.internal.util.GridSerializableCollection;
import org.apache.ignite.internal.util.GridSerializableIterator;
+import org.apache.ignite.internal.util.GridSerializableList;
import org.apache.ignite.internal.util.GridSerializableMap;
import org.apache.ignite.internal.util.GridSerializableSet;
import org.apache.ignite.internal.util.typedef.C1;
@@ -61,6 +68,7 @@ import org.apache.ignite.lang.IgniteBiTuple;
import org.apache.ignite.lang.IgniteCallable;
import org.apache.ignite.lang.IgniteClosure;
import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteOutClosure;
import org.apache.ignite.lang.IgnitePredicate;
import org.apache.ignite.lang.IgniteReducer;
import org.jetbrains.annotations.NotNull;
@@ -144,6 +152,13 @@ public class GridFunc {
};
/** */
+ public static final IgnitePredicate<Object> IS_NULL = new P1<Object>() {
+ @Override public boolean apply(Object o) {
+ return o == null;
+ }
+ };
+
+ /** */
public static final IgnitePredicate<Object> IS_NOT_NULL = new P1<Object>() {
@Override public boolean apply(Object o) {
return o != null;
@@ -151,6 +166,28 @@ public class GridFunc {
};
/** */
+ private static final IgniteCallable<?> LIST_FACTORY = new IgniteCallable<List>() {
+ @Override public List call() {
+ return new ArrayList();
+ }
+
+ @Override public String toString() {
+ return "Array list factory.";
+ }
+ };
+
+ /** */
+ private static final IgniteCallable<?> LINKED_LIST_FACTORY = new IgniteCallable<LinkedList>() {
+ @Override public LinkedList call() {
+ return new LinkedList();
+ }
+
+ @Override public String toString() {
+ return "Linked list factory.";
+ }
+ };
+
+ /** */
private static final IgniteCallable<?> SET_FACTORY = new IgniteCallable<Set>() {
@Override public Set call() {
return new HashSet();
@@ -162,6 +199,61 @@ public class GridFunc {
};
/** */
+ private static final IgniteCallable<AtomicInteger> ATOMIC_INT_FACTORY = new IgniteCallable<AtomicInteger>() {
+ @Override public AtomicInteger call() {
+ return new AtomicInteger(0);
+ }
+
+ @Override public String toString() {
+ return "Atomic integer factory.";
+ }
+ };
+
+ /** */
+ private static final IgniteCallable<AtomicLong> ATOMIC_LONG_FACTORY = new IgniteCallable<AtomicLong>() {
+ @Override public AtomicLong call() {
+ return new AtomicLong(0);
+ }
+
+ @Override public String toString() {
+ return "Atomic long factory.";
+ }
+ };
+
+ /** */
+ private static final IgniteCallable<AtomicBoolean> ATOMIC_BOOL_FACTORY = new IgniteCallable<AtomicBoolean>() {
+ @Override public AtomicBoolean call() {
+ return new AtomicBoolean();
+ }
+
+ @Override public String toString() {
+ return "Atomic boolean factory.";
+ }
+ };
+
+ /** */
+ private static final IgniteCallable<?> ATOMIC_REF_FACTORY = new IgniteCallable<AtomicReference>() {
+ @Override public AtomicReference call() {
+ return new AtomicReference();
+ }
+
+ @Override public String toString() {
+ return "Atomic reference factory.";
+ }
+ };
+
+ /** */
+ private static final IgniteCallable<?> MAP_FACTORY = new IgniteCallable<Map>() {
+ @Override public Map call() {
+ return new HashMap();
+ }
+
+ @Override public String toString() {
+ return "Hash map factory.";
+ }
+ };
+
+ /** */
private static final IgniteCallable<?> CONCURRENT_MAP_FACTORY = new IgniteCallable<ConcurrentMap>() {
@Override public ConcurrentMap call() {
return new ConcurrentHashMap8();
@@ -184,6 +276,42 @@ public class GridFunc {
};
/** */
+ private static final IgniteClosure CACHE_ENTRY_KEY = new IgniteClosure() {
+ @Override public Object apply(Object o) {
+ return ((Cache.Entry)o).getKey();
+ }
+
+ @Override public String toString() {
+ return "Map entry to key transformer closure.";
+ }
+ };
+
+
+ /** */
+ private static final IgniteClosure CACHE_ENTRY_VAL_GET = new IgniteClosure() {
+ @SuppressWarnings({"unchecked"})
+ @Nullable @Override public Object apply(Object o) {
+ return ((Cache.Entry)o).getValue();
+ }
+
+ @Override public String toString() {
+ return "Cache entry to get-value transformer closure.";
+ }
+ };
+
+ /** */
+ private static final IgnitePredicate CACHE_ENTRY_HAS_PEEK_VAL = new IgnitePredicate() {
+ @SuppressWarnings({"unchecked"})
+ @Override public boolean apply(Object o) {
+ return ((Cache.Entry)o).getValue() != null;
+ }
+
+ @Override public String toString() {
+ return "Cache entry has-peek-value predicate.";
+ }
+ };
+
+ /** */
private static final IgniteClosure<ClusterNode, UUID> NODE2ID = new IgniteClosure<ClusterNode, UUID>() {
@Override public UUID apply(ClusterNode n) {
return n.id();
@@ -216,8 +344,12 @@ public class GridFunc {
}
};
- /** Empty iterator. */
- private static final GridEmptyIterator EMPTY_ITER = new GridEmptyIterator();
+ /** */
+ private static final IgnitePredicate<IgniteInternalFuture<?>> UNFINISHED_FUTURE = new IgnitePredicate<IgniteInternalFuture<?>>() {
+ @Override public boolean apply(IgniteInternalFuture<?> f) {
+ return !f.isDone();
+ }
+ };
/**
* Gets predicate that evaluates to {@code true} only for given local node ID.
@@ -250,6 +382,23 @@ public class GridFunc {
}
/**
+ * Creates new collection by removing duplicates from the given collection.
+ *
+ * @param c Collection to remove duplicates from.
+ * @param <T> Type of the collection.
+ * @return De-duped collection.
+ */
+ public static <T> Collection<T> dedup(Collection<? extends T> c) {
+ A.notNull(c, "c");
+
+ Collection<T> set = new GridLeanSet<>();
+
+ set.addAll(c);
+
+ return set;
+ }
+
+ /**
* Calculates sum of all elements.
* <p>
* <img src="{@docRoot}/img/sum.png">
@@ -429,6 +578,8 @@ public class GridFunc {
if (isEmpty(res))
return Collections.emptyList();
+ assert res != null;
+
Collection<T> c = new ArrayList<>(res.size());
for (ComputeJobResult r : res)
@@ -542,6 +693,20 @@ public class GridFunc {
}
/**
+ * Gets random value from given array. This operation
+ * does not iterate through array elements and returns immediately.
+ *
+ * @param c Input collection.
+ * @param <T> Type of the collection.
+ * @return Random value from the input collection.
+ */
+ public static <T> T rand(T... c) {
+ A.notNull(c, "c");
+
+ return c[ThreadLocalRandom8.current().nextInt(c.length)];
+ }
+
+ /**
* Concatenates an element to a collection. If {@code copy} flag is {@code true}, then
* a new collection will be created and the element and passed in collection will be
* copied into the new one. The returned collection will be modifiable. If {@code copy}
@@ -564,6 +729,8 @@ public class GridFunc {
return l;
}
+ assert c != null;
+
Collection<T> ret = new ArrayList<>(c.size() + 1);
ret.add(t);
@@ -575,6 +742,8 @@ public class GridFunc {
if (isEmpty(c))
return Collections.singletonList(t);
+ assert c != null;
+
return new GridSerializableCollection<T>() {
@NotNull
@Override public Iterator<T> iterator() {
@@ -631,11 +800,8 @@ public class GridFunc {
if (isEmpty(c1) && isEmpty(c2))
return new ArrayList<>(0);
- if (isEmpty(c1)) {
- assert c2 != null;
-
+ if (isEmpty(c1))
return new ArrayList<>(c2);
- }
if (isEmpty(c2))
return new ArrayList<>(c1);
@@ -708,7 +874,7 @@ public class GridFunc {
* @param obj One or more elements.
* @return Concatenated array.
*/
- public static <T> T[] concat(@Nullable T[] arr, T[] obj) {
+ public static <T> T[] concat(@Nullable T[] arr, T... obj) {
T[] newArr;
if (arr == null || arr.length == 0)
@@ -723,15 +889,17 @@ public class GridFunc {
}
/**
- * Concatenate two iterators.
+ * Concatenates multiple iterators as single one.
*
- * @param iter1 Iterator 1.
- * @param iter2 Iterator 2.
+ * @param iters Iterators.
* @return Single iterator.
*/
@SuppressWarnings("unchecked")
- public static <T> Iterator<T> concat(Iterator<T> iter1, Iterator<T> iter2) {
- return concat(asList(iter1, iter2).iterator());
+ public static <T> Iterator<T> concat(Iterator<T> ... iters) {
+ if (iters.length == 1)
+ return iters[0];
+
+ return concat(asList(iters).iterator());
}
/**
@@ -812,10 +980,11 @@ public class GridFunc {
* @param <T> Type of collections.
* @return Collection of remaining elements
*/
- public static <T0, T extends T0> Collection<T> lose(Collection<T> c, boolean cp, @Nullable Collection<T0> filter) {
+ public static <T0, T extends T0> Collection<T> lose(Collection<T> c, boolean cp,
+ @Nullable Collection<T0> filter) {
A.notNull(c, "c");
- return lose(c, cp, in(filter));
+ return lose(c, cp, F0.in(filter));
}
/**
@@ -825,11 +994,11 @@ public class GridFunc {
* @param c Input collection.
* @param cp If {@code true} method creates new collection without modifying the input one,
* otherwise does <tt>in-place</tt> modifications.
- * @param p Predicate.
+ * @param p Predicates to filter by. If no predicates provided - no elements are lost.
* @param <T> Type of collections.
* @return Collection of remaining elements.
*/
- public static <T> Collection<T> lose(Collection<T> c, boolean cp, IgnitePredicate<? super T> p) {
+ public static <T> Collection<T> lose(Collection<T> c, boolean cp, @Nullable IgnitePredicate<? super T>... p) {
A.notNull(c, "c");
Collection<T> res;
@@ -837,18 +1006,19 @@ public class GridFunc {
if (!cp) {
res = c;
- if (!isAlwaysFalse(p)) {
- for (Iterator<T> iter = res.iterator(); iter.hasNext(); )
- if (p.apply(iter.next()))
+ if (isEmpty(p))
+ res.clear();
+ else if (!isAlwaysFalse(p))
+ for (Iterator<T> iter = res.iterator(); iter.hasNext();)
+ if (isAll(iter.next(), p))
iter.remove();
- }
}
else {
res = new LinkedList<>();
- if (!isAlwaysTrue(p))
+ if (!isEmpty(p) && !isAlwaysTrue(p))
for (T t : c)
- if (!p.apply(t))
+ if (!isAll(t, p))
res.add(t);
}
@@ -856,6 +1026,162 @@ public class GridFunc {
}
/**
+ * Loses all entries in input map that are evaluated to {@code true} by all given predicates.
+ *
+ * @param m Map to filter.
+ * @param cp If {@code true} method creates new map not modifying input, otherwise does
+ * <tt>in-place</tt> modifications.
+ * @param p Optional set of predicates to use for filtration. If none provided - original map
+ * will (or its copy) be returned.
+ * @param <K> Type of the free variable for the predicate and type of map's keys.
+ * @param <V> Type of the free variable for the predicate and type of map's values.
+ * @return Filtered map.
+ */
+ @SuppressWarnings({"unchecked"})
+ public static <K, V> Map<K, V> lose(Map<K, V> m, boolean cp,
+ @Nullable IgnitePredicate<? super Map.Entry<K, V>>... p) {
+ A.notNull(m, "m");
+
+ Map<K, V> res;
+
+ if (!cp) {
+ res = m;
+
+ if (isEmpty(p))
+ res.clear();
+ else if (!isAlwaysFalse(p))
+ for (Iterator<Map.Entry<K, V>> iter = m.entrySet().iterator(); iter.hasNext();)
+ if (isAll(iter.next(), p))
+ iter.remove();
+ }
+ else {
+ res = U.newHashMap(m.size());
+
+ if (!isEmpty(p) && !isAlwaysTrue(p))
+ for (Map.Entry<K, V> e : m.entrySet())
+ if (!F.isAll(e, p))
+ res.put(e.getKey(), e.getValue());
+ }
+
+ return res;
+ }
+
+ /**
+ * Loses all entries in input map which keys are evaluated to {@code true} by all
+ * given predicates.
+ *
+ * @param m Map to filter.
+ * @param cp If {@code true} method creates new map not modifying input, otherwise does
+ * <tt>in-place</tt> modifications.
+ * @param p Optional set of predicates to use for filtration. If none provided - original
+ * map (or its copy) will be returned.
+ * @param <K> Type of the free variable for the predicate and type of map's keys.
+ * @param <V> Type of map's values.
+ * @return Filtered map.
+ */
+ public static <K, V> Map<K, V> loseKeys(
+ Map<K, V> m,
+ boolean cp,
+ @Nullable final IgnitePredicate<? super K>... p
+ ) {
+ return lose(m, cp, new P1<Map.Entry<K, V>>() {
+ @Override public boolean apply(Map.Entry<K, V> e) {
+ return isAll(e.getKey(), p);
+ }
+ });
+ }
+
+ /**
+ * Loses all entries in input map which values are evaluated to {@code true} by all
+ * given predicates.
+ *
+ * @param m Map to filter.
+ * @param cp If {@code true} method creates new map not modifying input, otherwise does
+ * <tt>in-place</tt> modifications.
+ * @param p Optional set of predicates to use for filtration. If none provided - original
+ * map (or its copy) will be returned.
+ * @param <K> Type of the free variable for the predicate and type of map's keys.
+ * @param <V> Type of map's values.
+ * @return Filtered map.
+ */
+ public static <K, V> Map<K, V> loseValues(Map<K, V> m, boolean cp,
+ @Nullable final IgnitePredicate<? super V>... p) {
+ return lose(m, cp, new P1<Map.Entry<K, V>>() {
+ @Override public boolean apply(Map.Entry<K, V> e) {
+ return isAll(e.getValue(), p);
+ }
+ });
+ }
+
+ /**
+ * Loses all elements in input list that are contained in {@code filter} collection.
+ *
+ * @param c Input list.
+ * @param cp If {@code true} method creates new list not modifying input,
+ * otherwise does <tt>in-place</tt> modifications.
+ * @param filter Filter collection. If {@code filter} collection is empty or
+ * {@code null} - no elements are lost.
+ * @param <T> Type of list.
+ * @return List of remaining elements
+ */
+ public static <T> List<T> loseList(List<T> c, boolean cp, @Nullable Collection<? super T> filter) {
+ A.notNull(c, "c");
+
+ List<T> res;
+
+ if (!cp) {
+ res = c;
+
+ if (filter != null)
+ res.removeAll(filter);
+ }
+ else {
+ res = new LinkedList<>();
+
+ for (T t : c) {
+ if (filter == null || !filter.contains(t))
+ res.add(t);
+ }
+ }
+
+ return res;
+ }
+
+ /**
+ * Loses all elements in input list for which any of the predicates evaluate to {@code true}.
+ *
+ * @param c Input list.
+ * @param cp If {@code true} method creates new list not modifying input,
+ * otherwise does <tt>in-place</tt> modifications.
+ * @param p Looses all elements for which any of the predicates evaluate to {@code true}.
+ * @param <T> Type of list.
+ * @return List of remaining elements
+ */
+ public static <T> List<T> filterList(List<T> c, boolean cp, @Nullable IgnitePredicate<T>... p) {
+ A.notNull(c, "c");
+
+ List<T> res;
+
+ if (!cp) {
+ res = c;
+
+ if (p != null)
+ for (Iterator<T> it = c.iterator(); it.hasNext();)
+ if (isAny(it.next(), p))
+ it.remove();
+ }
+ else {
+ res = new ArrayList<>(c.size());
+
+ for (T t : c)
+ if (!isAny(t, p))
+ res.add(t);
+ }
+
+ return res;
+ }
+
+ /**
* Gets closure which converts node to node ID.
*
* @return Closure which converts node to node ID.
@@ -927,40 +1253,34 @@ public class GridFunc {
}
/**
- * Retains all elements in input collection that are contained in {@code filter}.
+ * Creates predicates that evaluates to {@code true} for each node in given collection.
+ * Note that if collection is empty the result predicate will always evaluate to {@code false}.
+ * Implementation simply creates {@link GridNodePredicate} instance.
*
- * @param c Input collection.
- * @param cp If {@code true} method creates collection not modifying input, otherwise does
- * <tt>in-place</tt> modifications.
- * @param filter Filter collection. If filter collection is {@code null} or empty -
- * an empty collection will be returned.
- * @param <T> Type of collections.
- * @return Collection of retain elements.
+ * @param nodes Collection of nodes. If none provided - result predicate will always
+ * return {@code false}.
+ * @return Predicates that evaluates to {@code true} for each node in given collection.
*/
- public static <T0, T extends T0> Collection<T> retain(Collection<T> c, boolean cp,
- @Nullable Collection<? extends T0> filter) {
- A.notNull(c, "c");
-
- return retain(c, cp, in(filter));
+ public static IgnitePredicate<ClusterNode> nodeForNodes(ClusterNode... nodes) {
+ return new GridNodePredicate(nodes);
}
/**
- * Retains all elements in input collection that are evaluated to {@code true}
- * by the given predicate.
+ * Retains all elements in input collection that are contained in {@code filter}.
*
* @param c Input collection.
* @param cp If {@code true} method creates collection not modifying input, otherwise does
* <tt>in-place</tt> modifications.
- * @param p Predicates to filter by. If no predicates provides - all elements
- * will be retained.
+ * @param filter Filter collection. If filter collection is {@code null} or empty -
+ * an empty collection will be returned.
* @param <T> Type of collections.
* @return Collection of retain elements.
*/
- public static <T> Collection<T> retain(Collection<T> c, boolean cp, IgnitePredicate<? super T> p) {
+ public static <T0, T extends T0> Collection<T> retain(Collection<T> c, boolean cp,
+ @Nullable Collection<? extends T0> filter) {
A.notNull(c, "c");
- A.notNull(p, "p");
- return lose(c, cp, not(p));
+ return retain(c, cp, F0.in(filter));
}
/**
@@ -975,7 +1295,7 @@ public class GridFunc {
* @param <T> Type of collections.
* @return Collection of retain elements.
*/
- public static <T> Collection<T> retain(Collection<T> c, boolean cp, @Nullable IgnitePredicate<? super T>[] p) {
+ public static <T> Collection<T> retain(Collection<T> c, boolean cp, @Nullable IgnitePredicate<? super T>... p) {
A.notNull(c, "c");
return lose(c, cp, not(p));
@@ -1025,19 +1345,36 @@ public class GridFunc {
}
/**
- * Create list containing two elements.
+ * Curries given closure.
*
- * @param t1 First element.
- * @param t2 Second element.
- * @return List.
+ * @param f Closure.
+ * @param e Parameter.
+ * @param <T> Input type.
+ * @param <R> Output type.
+ * @return Curried closure.
*/
- public static <T> List<T> asList(T t1, T t2) {
- ArrayList<T> res = new ArrayList<>(2);
-
- res.add(t1);
- res.add(t2);
+ public static <T, R> IgniteOutClosure<R> curry(final IgniteClosure<? super T, R> f, final T e) {
+ return new IgniteOutClosure<R>() {
+ @Override public R apply() {
+ return f.apply(e);
+ }
+ };
+ }
- return res;
+ /**
+ * Curries given closure.
+ *
+ * @param f Closure.
+ * @param e Parameter.
+ * @param <T> Input type.
+ * @return Curried closure.
+ */
+ public static <T> GridAbsClosure curry(final IgniteInClosure<? super T> f, final T e) {
+ return new GridAbsClosure() {
+ @Override public void apply() {
+ f.apply(e);
+ }
+ };
}
/**
@@ -1053,7 +1390,6 @@ public class GridFunc {
* @param <T> Array type.
* @return {@link List} instance for array.
*/
- @SuppressWarnings("unchecked")
public static <T> List<T> asList(@Nullable T... vals) {
return isEmpty(vals) ? Collections.<T>emptyList() : Arrays.asList(vals);
}
@@ -1064,9 +1400,8 @@ public class GridFunc {
* @param <T> Type of the iterator.
* @return Newly created empty iterator.
*/
- @SuppressWarnings("unchecked")
public static <T> GridIterator<T> emptyIterator() {
- return EMPTY_ITER;
+ return new GridEmptyIterator<>();
}
/**
@@ -1239,29 +1574,6 @@ public class GridFunc {
}
/**
- * Gets size of the given collection.
- *
- * @param c Collection.
- * @return Size.
- */
- public static <T> int size(@Nullable Collection<? extends T> c) {
- return c == null || c.isEmpty() ? 0 : c.size();
- }
-
- /**
- * Gets size of the given collection with provided optional predicates.
- *
- * @param c Collection to size.
- * @param p Optional predicates that filters out elements from count.
- * @param <T> Type of the iterator.
- * @return Number of elements in the collection for which all given predicates
- * evaluates to {@code true}. If no predicates is provided - all elements are counted.
- */
- public static <T> int size(@Nullable Collection<? extends T> c, IgnitePredicate<? super T> p) {
- return c == null || c.isEmpty() ? 0 : isAlwaysTrue(p) ? c.size() : size(c.iterator(), p);
- }
-
- /**
* Gets size of the given collection with provided optional predicates.
*
* @param c Collection to size.
@@ -1270,34 +1582,12 @@ public class GridFunc {
* @return Number of elements in the collection for which all given predicates
* evaluates to {@code true}. If no predicates is provided - all elements are counted.
*/
- public static <T> int size(@Nullable Collection<? extends T> c, @Nullable IgnitePredicate<? super T>[] p) {
+ public static <T> int size(@Nullable Collection<? extends T> c, @Nullable IgnitePredicate<? super T>... p) {
return c == null || c.isEmpty() ? 0 : isEmpty(p) || isAlwaysTrue(p) ? c.size() : size(c.iterator(), p);
}
/**
- * Gets size of the given iterator. Iterator will be traversed to get the count.
- *
- * @param it Iterator to size.
- * @param <T> Type of the iterator.
- * @return Number of elements in the iterator.
- */
- public static <T> int size(@Nullable Iterator<? extends T> it) {
- if (it == null)
- return 0;
-
- int n = 0;
-
- while (it.hasNext()) {
- n++;
-
- it.next();
- }
-
- return n;
- }
-
- /**
- * Gets size of the given iterator with provided optional predicate. Iterator
+ * Gets size of the given iterator with provided optional predicates. Iterator
* will be traversed to get the count.
*
* @param it Iterator to size.
@@ -1306,7 +1596,7 @@ public class GridFunc {
* @return Number of elements in the iterator for which all given predicates
* evaluates to {@code true}. If no predicates is provided - all elements are counted.
*/
- public static <T> int size(@Nullable Iterator<? extends T> it, IgnitePredicate<? super T> p) {
+ public static <T> int size(@Nullable Iterator<? extends T> it, @Nullable IgnitePredicate<? super T>... p) {
if (it == null)
return 0;
@@ -1314,7 +1604,7 @@ public class GridFunc {
if (!isAlwaysFalse(p)) {
while (it.hasNext()) {
- if (p.apply(it.next()))
+ if (isAll(it.next(), p))
n++;
}
}
@@ -1323,52 +1613,34 @@ public class GridFunc {
}
/**
- * Gets size of the given iterator with provided optional predicates. Iterator
- * will be traversed to get the count.
- *
- * @param it Iterator to size.
- * @param p Optional predicates that filters out elements from count.
- * @param <T> Type of the iterator.
- * @return Number of elements in the iterator for which all given predicates
- * evaluates to {@code true}. If no predicates is provided - all elements are counted.
- */
- public static <T> int size(@Nullable Iterator<? extends T> it, @Nullable IgnitePredicate<? super T>[] p) {
- if (it == null)
- return 0;
-
- int n = 0;
-
- if (!isAlwaysFalse(p)) {
- while (it.hasNext()) {
- if (isAll(it.next(), p))
- n++;
- }
- }
-
- return n;
- }
-
- /**
- * Creates write-through light-weight view on given collection.
+ * Creates write-through light-weight view on given collection with provided predicates. Resulting
+ * collection will only "have" elements for which all provided predicates, if any, evaluate
+ * to {@code true}. Note that only wrapping collection will be created and no duplication of
+ * data will occur. Also note that if array of given predicates is not empty then method
+ * {@code size()} uses full iteration through the collection.
*
* @param c Input collection that serves as a base for the view.
* @param p Optional predicates. If predicates are not provided - all elements will be in the view.
* @param <T> Type of the collection.
* @return Light-weight view on given collection with provided predicate.
*/
- public static <T> Collection<T> view(@Nullable final Collection<T> c, final IgnitePredicate<? super T> p) {
+ @SafeVarargs
+ public static <T> Collection<T> view(@Nullable final Collection<T> c,
+ @Nullable final IgnitePredicate<? super T>... p) {
if (isEmpty(c) || isAlwaysFalse(p))
return Collections.emptyList();
- return isAlwaysTrue(p) ? c : new GridSerializableCollection<T>() {
+ assert c != null;
+
+ return isEmpty(p) || isAlwaysTrue(p) ? c : new GridSerializableCollection<T>() {
// Pass through (will fail for readonly).
@Override public boolean add(T e) {
- return p.apply(e) && c.add(e);
+ return isAll(e, p) && c.add(e);
}
@NotNull
@Override public Iterator<T> iterator() {
- return identityIterator(c, p);
+ return F.iterator0(c, false, p);
}
@Override public int size() {
@@ -1376,82 +1648,117 @@ public class GridFunc {
}
@Override public boolean isEmpty() {
- return !iterator().hasNext();
+ return F.isEmpty(p) ? c.isEmpty() : !iterator().hasNext();
}
};
}
/**
- * Creates read-only light-weight view on given collection with transformation.
+ * Creates read-only light-weight view on given collection with transformation and provided
+ * predicates. Resulting collection will only "have" {@code transformed} elements for which
+ * all provided predicate, if any, evaluates to {@code true}. Note that only wrapping
+ * collection will be created and no duplication of data will occur. Also note that if array
+ * of given predicates is not empty then method {@code size()} uses full iteration through
+ * the collection.
*
* @param c Input collection that serves as a base for the view.
* @param trans Transformation closure.
+ * @param p Optional predicated. If predicates are not provided - all elements will be in the view.
* @param <T1> Type of the collection.
* @return Light-weight view on given collection with provided predicate.
*/
@SuppressWarnings("RedundantTypeArguments")
+ @SafeVarargs
public static <T1, T2> Collection<T2> viewReadOnly(@Nullable final Collection<? extends T1> c,
- final IgniteClosure<? super T1, T2> trans) {
+ final IgniteClosure<? super T1, T2> trans, @Nullable final IgnitePredicate<? super T1>... p) {
A.notNull(trans, "trans");
- if (isEmpty(c))
+ if (isEmpty(c) || isAlwaysFalse(p))
return Collections.emptyList();
+ assert c != null;
+
return new GridSerializableCollection<T2>() {
@NotNull
@Override public Iterator<T2> iterator() {
- return F.<T1, T2>iteratorReadOnly(c, trans);
+ return F.<T1, T2>iterator(c, trans, true, p);
}
@Override public int size() {
- return c.size();
+ return F.isEmpty(p) ? c.size() : F.size(iterator());
}
@Override public boolean isEmpty() {
- return c.isEmpty();
+ return F.isEmpty(p) ? c.isEmpty() : !iterator().hasNext();
}
};
}
/**
- * Creates read-only light-weight view on given collection with transformation and provided
- * predicates. Resulting collection will only "have" {@code transformed} elements for which
- * all provided predicate, if any, evaluates to {@code true}. Note that only wrapping
- * collection will be created and no duplication of data will occur. Also note that if array
- * of given predicates is not empty then method {@code size()} uses full iteration through
- * the collection.
+ * Creates read-only light-weight view on given list with provided transformation.
+ * Resulting list will only "have" {@code transformed} elements. Note that only wrapping
+ * list will be created and no duplication of data will occur.
*
- * @param c Input collection that serves as a base for the view.
+ * @param c Input list that serves as a base for the view.
* @param trans Transformation closure.
- * @param p Optional predicated. If predicates are not provided - all elements will be in the view.
- * @param <T1> Type of the collection.
- * @return Light-weight view on given collection with provided predicate.
+ * @param <T1> Type of the list.
+ * @return Light-weight view on given list with provided transformation.
*/
@SuppressWarnings("RedundantTypeArguments")
- public static <T1, T2> Collection<T2> viewReadOnly(@Nullable final Collection<? extends T1> c,
- final IgniteClosure<? super T1, T2> trans, @Nullable final IgnitePredicate<? super T1> p) {
+ public static <T1, T2> List<T2> viewListReadOnly(@Nullable final List<? extends T1> c,
+ final IgniteClosure<? super T1, T2> trans) {
A.notNull(trans, "trans");
- if (isEmpty(c) || isAlwaysFalse(p))
+ if (isEmpty(c))
return Collections.emptyList();
- return new GridSerializableCollection<T2>() {
+ assert c != null;
+
+ return new GridSerializableList<T2>() {
+ /** */
+ private static final long serialVersionUID = 3126625219739967068L;
+
+ @Override public T2 get(int idx) {
+ return trans.apply(c.get(idx));
+ }
+
@NotNull
@Override public Iterator<T2> iterator() {
- return F.<T1, T2>iterator(c, trans, true, p);
+ return F.<T1, T2>iterator(c, trans, true);
}
@Override public int size() {
- return F.size(iterator());
+ return c.size();
}
@Override public boolean isEmpty() {
- return !iterator().hasNext();
+ return c.isEmpty();
}
};
}
/**
+ * Creates a view on given list with provided transformer and predicates.
+ * Resulting list will only "have" elements for which all provided predicates, if any,
+ * evaluate to {@code true}. Note that a new collection will be created and data will
+ * be copied.
+ *
+ * @param c Input list that serves as a base for the view.
+ * @param trans Transforming closure from T1 to T2.
+ * @param p Optional predicates. If predicates are not provided - all elements will be in the view.
+ * @return View on given list with provided predicate.
+ */
+ public static <T1, T2> List<T2> transformList(Collection<? extends T1> c,
+ IgniteClosure<? super T1, T2> trans, @Nullable IgnitePredicate<? super T1>... p) {
+ A.notNull(c, "c", trans, "trans");
+
+ if (isAlwaysFalse(p))
+ return Collections.emptyList();
+
+ return new ArrayList<>(transform(retain(c, true, p), trans));
+ }
+
+ /**
* Creates light-weight view on given map with provided predicates. Resulting map will
* only "have" keys for which all provided predicates, if any, evaluates to {@code true}.
* Note that only wrapping map will be created and no duplication of data will occur.
@@ -1459,24 +1766,26 @@ public class GridFunc {
* uses full iteration through the entry set.
*
* @param m Input map that serves as a base for the view.
- * @param p Predicate.
+ * @param p Optional predicates. If predicates are not provided - all will be in the view.
* @param <K> Type of the key.
* @param <V> Type of the value.
* @return Light-weight view on given map with provided predicate.
*/
public static <K0, K extends K0, V0, V extends V0> Map<K, V> view(@Nullable final Map<K, V> m,
- final IgnitePredicate<? super K> p) {
+ @Nullable final IgnitePredicate<? super K>... p) {
if (isEmpty(m) || isAlwaysFalse(p))
return Collections.emptyMap();
- return isAlwaysTrue(p) ? m : new GridSerializableMap<K, V>() {
+ assert m != null;
+
+ return isEmpty(p) || isAlwaysTrue(p) ? m : new GridSerializableMap<K, V>() {
/** */
private static final long serialVersionUID = 5531745605372387948L;
/** Entry predicate. */
private IgnitePredicate<Entry<K, V>> ep = new P1<Map.Entry<K, V>>() {
@Override public boolean apply(Entry<K, V> e) {
- return p.apply(e.getKey());
+ return isAll(e.getKey(), p);
}
};
@@ -1485,7 +1794,7 @@ public class GridFunc {
return new GridSerializableSet<Map.Entry<K, V>>() {
@NotNull
@Override public Iterator<Entry<K, V>> iterator() {
- return identityIterator(m.entrySet(), ep);
+ return iterator0(m.entrySet(), false, ep);
}
@Override public int size() {
@@ -1494,12 +1803,12 @@ public class GridFunc {
@SuppressWarnings({"unchecked"})
@Override public boolean remove(Object o) {
- return ep.apply((Map.Entry<K, V>)o) && m.entrySet().remove(o);
+ return F.isAll((Map.Entry<K, V>)o, ep) && m.entrySet().remove(o);
}
@SuppressWarnings({"unchecked"})
@Override public boolean contains(Object o) {
- return ep.apply((Map.Entry<K, V>)o) && m.entrySet().contains(o);
+ return F.isAll((Map.Entry<K, V>)o, ep) && m.entrySet().contains(o);
}
@Override public boolean isEmpty() {
@@ -1514,13 +1823,13 @@ public class GridFunc {
@SuppressWarnings({"unchecked"})
@Nullable @Override public V get(Object key) {
- return p.apply((K)key) ? m.get(key) : null;
+ return isAll((K)key, p) ? m.get(key) : null;
}
@Nullable @Override public V put(K key, V val) {
V oldVal = get(key);
- if (p.apply(key))
+ if (isAll(key, p))
m.put(key, val);
return oldVal;
@@ -1528,7 +1837,7 @@ public class GridFunc {
@SuppressWarnings({"unchecked"})
@Override public boolean containsKey(Object key) {
- return p.apply((K)key) && m.containsKey(key);
+ return isAll((K)key, p) && m.containsKey(key);
}
};
}
@@ -1542,26 +1851,38 @@ public class GridFunc {
*
* @param m Input map that serves as a base for the view.
* @param trans Transformer for map value transformation.
+ * @param p Optional predicates. If predicates are not provided - all will be in the view.
* @param <K> Type of the key.
* @param <V> Type of the input map value.
* @param <V1> Type of the output map value.
* @return Light-weight view on given map with provided predicate and transformer.
*/
public static <K0, K extends K0, V0, V extends V0, V1> Map<K, V1> viewReadOnly(@Nullable final Map<K, V> m,
- final IgniteClosure<V, V1> trans) {
+ final IgniteClosure<V, V1> trans, @Nullable final IgnitePredicate<? super K>... p) {
A.notNull(trans, "trans");
- if (isEmpty(m))
+ if (isEmpty(m) || isAlwaysFalse(p))
return Collections.emptyMap();
+ assert m != null;
+
+ final boolean hasPred = p != null && p.length > 0;
+
return new GridSerializableMap<K, V1>() {
+ /** Entry predicate. */
+ private IgnitePredicate<Entry<K, V>> ep = new P1<Map.Entry<K, V>>() {
+ @Override public boolean apply(Entry<K, V> e) {
+ return isAll(e.getKey(), p);
+ }
+ };
+
@NotNull
@Override public Set<Entry<K, V1>> entrySet() {
return new GridSerializableSet<Map.Entry<K, V1>>() {
@NotNull
@Override public Iterator<Entry<K, V1>> iterator() {
return new Iterator<Entry<K, V1>>() {
- private Iterator<Entry<K, V>> it = identityIteratorReadOnly(m.entrySet());
+ private Iterator<Entry<K, V>> it = iterator0(m.entrySet(), true, ep);
@Override public boolean hasNext() {
return it.hasNext();
@@ -1592,7 +1913,7 @@ public class GridFunc {
}
@Override public int size() {
- return m.size();
+ return hasPred ? F.size(m.keySet(), p) : m.size();
}
@SuppressWarnings({"unchecked"})
@@ -1602,25 +1923,27 @@ public class GridFunc {
@SuppressWarnings({"unchecked"})
@Override public boolean contains(Object o) {
- return m.entrySet().contains(o);
+ return F.isAll((Map.Entry<K, V>)o, ep) && m.entrySet().contains(o);
}
@Override public boolean isEmpty() {
- return m.isEmpty();
+ return hasPred ? !iterator().hasNext() : m.isEmpty();
}
};
}
@Override public boolean isEmpty() {
- return m.isEmpty();
+ return hasPred ? entrySet().isEmpty() : m.isEmpty();
}
@SuppressWarnings({"unchecked"})
@Nullable @Override public V1 get(Object key) {
- V v = m.get(key);
+ if (isAll((K)key, p)) {
+ V v = m.get(key);
- if (v != null)
- return trans.apply(v);
+ if (v != null)
+ return trans.apply(v);
+ }
return null;
}
@@ -1635,34 +1958,170 @@ public class GridFunc {
@SuppressWarnings({"unchecked"})
@Override public boolean containsKey(Object key) {
- return m.containsKey(key);
+ return isAll((K)key, p) && m.containsKey(key);
}
};
}
/**
- * Read-only map view of a collection.
+ * Read-only view on map that supports transformation of values and key filtering. Resulting map will
+ * only "have" keys for which all provided predicates, if any, evaluates to {@code true}.
+ * Note that only wrapping map will be created and no duplication of data will occur.
+ * Also note that if array of given predicates is not empty then method {@code size()}
+ * uses full iteration through the entry set.
+ *
+ * @param m Input map that serves as a base for the view.
+ * @param trans Transformer for map value transformation.
+ * @param p Optional predicates. If predicates are not provided - all will be in the view.
+ * @param <K> Type of the key.
+ * @param <V> Type of the input map value.
+ * @param <V1> Type of the output map value.
+ * @return Light-weight view on given map with provided predicate and transformer.
+ */
+ public static <K0, K extends K0, V0, V extends V0, V1> Map<K, V1> viewReadOnly(@Nullable final Map<K, V> m,
+ final IgniteBiClosure<K, V, V1> trans, @Nullable final IgnitePredicate<? super K>... p) {
+ A.notNull(trans, "trans");
+
+ if (isEmpty(m) || isAlwaysFalse(p))
+ return Collections.emptyMap();
+
+ assert m != null;
+
+ return new GridSerializableMap<K, V1>() {
+ /** Entry predicate. */
+ private IgnitePredicate<Entry<K, V>> ep = new P1<Map.Entry<K, V>>() {
+ @Override public boolean apply(Entry<K, V> e) {
+ return isAll(e.getKey(), p);
+ }
+ };
+
+ @NotNull
+ @Override public Set<Entry<K, V1>> entrySet() {
+ return new GridSerializableSet<Map.Entry<K, V1>>() {
+ @NotNull
+ @Override public Iterator<Entry<K, V1>> iterator() {
+ return new Iterator<Entry<K, V1>>() {
+ private Iterator<Entry<K, V>> it = iterator0(m.entrySet(), true, ep);
+
+ @Override public boolean hasNext() {
+ return it.hasNext();
+ }
+
+ @Override public Entry<K, V1> next() {
+ final Entry<K, V> e = it.next();
+
+ return new Entry<K, V1>() {
+ @Override public K getKey() {
+ return e.getKey();
+ }
+
+ @Override public V1 getValue() {
+ return trans.apply(e.getKey(), e.getValue());
+ }
+
+ @Override public V1 setValue(V1 val) {
+ throw new UnsupportedOperationException(
+ "Put is not supported for readonly map view.");
+ }
+ };
+ }
+
+ @Override public void remove() {
+ throw new UnsupportedOperationException("Remove is not support for readonly map view.");
+ }
+ };
+ }
+
+ @Override public int size() {
+ return F.size(m.keySet(), p);
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override public boolean remove(Object o) {
+ throw new UnsupportedOperationException("Remove is not support for readonly map view.");
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override public boolean contains(Object o) {
+ return F.isAll((Map.Entry<K, V>)o, ep) && m.entrySet().contains(o);
+ }
+
+ @Override public boolean isEmpty() {
+ return !iterator().hasNext();
+ }
+ };
+ }
+
+ @Override public boolean isEmpty() {
+ return entrySet().isEmpty();
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Nullable @Override public V1 get(Object key) {
+ if (isAll((K)key, p)) {
+ V v = m.get(key);
+
+ if (v != null)
+ return trans.apply((K)key, v);
+ }
+
+ return null;
+ }
+
+ @Nullable @Override public V1 put(K key, V1 val) {
+ throw new UnsupportedOperationException("Put is not supported for readonly map view.");
+ }
+
+ @Override public V1 remove(Object key) {
+ throw new UnsupportedOperationException("Remove is not supported for readonly map view.");
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override public boolean containsKey(Object key) {
+ return isAll((K)key, p) && m.containsKey(key);
+ }
+ };
+ }
+
+ /**
+ * Read-only map view of a collection. Resulting map is a lightweight view of an input collection,
+ * with filtered elements of an input collection as keys, and closure execution results
+ * as values. The map will only contain keys for which all provided predicates, if any, evaluate
+ * to {@code true}. Note that only wrapping map will be created and no duplication of data will occur.
+ * Also note that if array of given predicates is not empty then method {@code size()}
+ * uses full iteration through the entry set.
*
* @param c Input collection.
* @param mapClo Mapping closure, that maps key to value.
+ * @param p Optional predicates to filter input collection. If predicates are not provided - all
+ * elements will be in the view.
* @param <K> Key type.
* @param <V> Value type.
* @return Light-weight view on given map with provided predicates and mapping.
*/
@SuppressWarnings("TypeMayBeWeakened")
public static <K0, K extends K0, V0, V extends V0> Map<K, V> viewAsMap(@Nullable final Set<K> c,
- final IgniteClosure<? super K, V> mapClo) {
+ final IgniteClosure<? super K, V> mapClo, @Nullable final IgnitePredicate<? super K>... p) {
A.notNull(mapClo, "trans");
- if (isEmpty(c))
+ if (isEmpty(c) || isAlwaysFalse(p))
return Collections.emptyMap();
+ assert c != null;
+
return new GridSerializableMap<K, V>() {
+ /** Entry predicate. */
+ private IgnitePredicate<K> ep = new P1<K>() {
+ @Override public boolean apply(K e) {
+ return isAll(e, p);
+ }
+ };
+
@NotNull @Override public Set<Entry<K, V>> entrySet() {
return new GridSerializableSet<Entry<K, V>>() {
@NotNull @Override public Iterator<Entry<K, V>> iterator() {
return new Iterator<Entry<K, V>>() {
- private Iterator<K> it = identityIteratorReadOnly(c);
+ private Iterator<K> it = iterator0(c, true, ep);
@Override public boolean hasNext() {
return it.hasNext();
@@ -1695,7 +2154,7 @@ public class GridFunc {
}
@Override public int size() {
- return c.size();
+ return F.size(c, p);
}
@Override public boolean remove(Object o) {
@@ -1703,16 +2162,15 @@ public class GridFunc {
}
@Override public boolean isEmpty() {
- return c.isEmpty();
+ return !iterator().hasNext();
}
};
}
@Override public boolean isEmpty() {
- return c.isEmpty();
+ return entrySet().isEmpty();
}
- @SuppressWarnings("unchecked")
@Nullable @Override public V get(Object key) {
if (containsKey(key))
return mapClo.apply((K)key);
@@ -1728,9 +2186,8 @@ public class GridFunc {
throw new UnsupportedOperationException("Remove is not supported for readonly collection view.");
}
- @SuppressWarnings("SuspiciousMethodCalls")
@Override public boolean containsKey(Object key) {
- return c.contains(key);
+ return isAll((K)key, p) && c.contains(key);
}
};
}
@@ -1833,6 +2290,40 @@ public class GridFunc {
}
/**
+ * Utility map getter. This method analogous to {@link #addIfAbsent(Map, Object, Callable)}
+ * method but this one doesn't put the default value into the map when key is not found.
+ *
+ * @param map Map to get value from.
+ * @param key Map key (can be {@code null}).
+ * @param c Optional factory closure for the default value to be returned in
+ * when {@code key} is not found. If closure is not provided - {@code null} will be returned.
+ * @param <K> Map key type.
+ * @param <V> Map value type.
+ * @return Value for the {@code key} or default value produced by {@code c} if key is not
+ * found (or {@code null} if key is not found and closure is not provided).
+ * @throws GridClosureException Thrown in case when callable throws exception.
+ * @see #newLinkedList()
+ * @see #newList()
+ * @see #newSet()
+ * @see #newMap()
+ * @see #newAtomicLong()
+ * @see #newAtomicInt()
+ * @see #newAtomicRef()
+ * @see #newAtomicBoolean()
+ */
+ @Nullable public static <K, V> V returnIfAbsent(Map<? extends K, ? extends V> map, @Nullable K key,
+ @Nullable Callable<V> c) {
+ A.notNull(map, "map");
+
+ try {
+ return !map.containsKey(key) ? c == null ? null : c.call() : map.get(key);
+ }
+ catch (Exception e) {
+ throw wrap(e);
+ }
+ }
+
+ /**
* Returns a factory closure that creates new {@link ConcurrentLinkedDeque8} instance.
* Note that this method does not create a new closure but returns a static one.
*
@@ -1846,6 +2337,82 @@ public class GridFunc {
}
/**
+ * Returns a factory closure that creates new {@link List} instance. Note that this
+ * method does not create a new closure but returns a static one.
+ *
+ * @param <T> Type parameters for the created {@link List}.
+ * @return Factory closure that creates new {@link List} instance every
+ * time its {@link org.apache.ignite.lang.IgniteOutClosure#apply()} method is called.
+ */
+ @SuppressWarnings("unchecked")
+ public static <T> IgniteCallable<List<T>> newList() {
+ return (IgniteCallable<List<T>>)LIST_FACTORY;
+ }
+
+ /**
+ * Returns a factory closure that creates new {@link AtomicInteger} instance
+ * initialized to {@code zero}. Note that this method does not create a new
+ * closure but returns a static one.
+ *
+ * @return Factory closure that creates new {@link AtomicInteger} instance
+ * initialized to {@code zero} every time its {@link org.apache.ignite.lang.IgniteOutClosure#apply()} method is called.
+ */
+ public static IgniteCallable<AtomicInteger> newAtomicInt() {
+ return ATOMIC_INT_FACTORY;
+ }
+
+ /**
+ * Returns a factory closure that creates new {@link AtomicLong} instance
+ * initialized to {@code zero}. Note that this method does not create a new
+ * closure but returns a static one.
+ *
+ * @return Factory closure that creates new {@link AtomicLong} instance
+ * initialized to {@code zero} every time its {@link org.apache.ignite.lang.IgniteOutClosure#apply()} method is called.
+ */
+ public static IgniteCallable<AtomicLong> newAtomicLong() {
+ return ATOMIC_LONG_FACTORY;
+ }
+
+ /**
+ * Returns a factory closure that creates new {@link AtomicReference} instance
+ * initialized to {@code null}. Note that this method does not create a new closure
+ * but returns a static one.
+ *
+ * @param <T> Type of the atomic reference.
+ * @return Factory closure that creates new {@link AtomicReference} instance
+ * initialized to {@code null} every time its {@link org.apache.ignite.lang.IgniteOutClosure#apply()} method is called.
+ */
+ @SuppressWarnings("unchecked")
+ public static <T> IgniteCallable<AtomicReference<T>> newAtomicRef() {
+ return (IgniteCallable<AtomicReference<T>>)ATOMIC_REF_FACTORY;
+ }
+
+ /**
+ * Returns a factory closure that creates new {@link AtomicBoolean} instance
+ * initialized to {@code false}. Note that this method does not create a new
+ * closure but returns a static one.
+ *
+ * @return Factory closure that creates new {@link AtomicBoolean} instance
+ * initialized to {@code false} every time its {@link org.apache.ignite.lang.IgniteOutClosure#apply()} method is called.
+ */
+ public static IgniteCallable<AtomicBoolean> newAtomicBoolean() {
+ return ATOMIC_BOOL_FACTORY;
+ }
+
+ /**
+ * Returns a factory closure that creates new {@link LinkedList} instance.
+ * Note that this method does not create a new closure but returns a static one.
+ *
+ * @param <T> Type parameters for the created {@link LinkedList}.
+ * @return Factory closure that creates new {@link LinkedList} instance every time its {@link
+ * org.apache.ignite.lang.IgniteOutClosure#apply()} method is called.
+ */
+ @SuppressWarnings("unchecked")
+ public static <T> IgniteCallable<LinkedList<T>> newLinkedList() {
+ return (IgniteCallable<LinkedList<T>>)LINKED_LIST_FACTORY;
+ }
+
+ /**
* Returns a factory closure that creates new {@link Set} instance. Note that this
* method does not create a new closure but returns a static one.
*
@@ -1859,6 +2426,20 @@ public class GridFunc {
}
/**
+ * Returns a factory closure that creates new {@link Map} instance. Note
+ * that this method does not create a new closure but returns a static one.
+ *
+ * @param <K> Type of the key for the created {@link Map}.
+ * @param <V> Type of the value for the created {@link Map}.
+ * @return Factory closure that creates new {@link Map} instance every
+ * time its {@link org.apache.ignite.lang.IgniteOutClosure#apply()} method is called.
+ */
+ @SuppressWarnings("unchecked")
+ public static <K, V> IgniteCallable<Map<K, V>> newMap() {
+ return (IgniteCallable<Map<K, V>>)MAP_FACTORY;
+ }
+
+ /**
* Returns a factory closure that creates new {@link ConcurrentMap} instance.
* Note that this method does not create a new closure but returns a static one.
*
@@ -1885,18 +2466,6 @@ public class GridFunc {
}
/**
- * Creates and returns iterator from given collection.
- *
- * @param c Input collection.
- * @param <T> Type of the collection elements.
- * @return Iterator from given collection and optional filtering predicate.
- */
- @SuppressWarnings({"unchecked"})
- public static <T> GridIterator<T> identityIteratorReadOnly(Iterable<? extends T> c) {
- return iteratorReadOnly(c, IDENTITY);
- }
-
- /**
* Creates and returns iterator from given collection and optional filtering predicates.
* Returned iterator will only have elements for which all given predicates evaluates to
* {@code true} (if provided). Note that this method will not create new collection but
@@ -1904,29 +2473,16 @@ public class GridFunc {
* evaluate to {@code true} for.
*
* @param c Input collection.
- * @param p Optional filtering predicate.
+ * @param readOnly If {@code true}, then resulting iterator will not allow modifications
+ * to the underlying collection.
+ * @param p Optional filtering predicates.
* @param <T> Type of the collection elements.
* @return Iterator from given collection and optional filtering predicate.
*/
@SuppressWarnings({"unchecked"})
- public static <T> GridIterator<T> identityIterator(Iterable<? extends T> c, IgnitePredicate<? super T> p) {
- return iterator(c, IDENTITY, false, p);
- }
-
- /**
- * Creates and returns transforming iterator from given collection.
- *
- * @param c Input collection.
- * @param trans Transforming closure to convert from T1 to T2.
- * @param <T1> Type of the collection elements.
- * @param <T2> Type of returned elements.
- * @return Iterator from given collection and optional filtering predicate.
- */
- public static <T1, T2> GridIterator<T2> iteratorReadOnly(final Iterable<? extends T1> c,
- final IgniteClosure<? super T1, T2> trans) {
- A.notNull(c, "c", trans, "trans");
-
- return iterator(c.iterator(), trans, true);
+ public static <T> GridIterator<T> iterator0(Iterable<? extends T> c, boolean readOnly,
+ IgnitePredicate<? super T>... p) {
+ return F.iterator(c, IDENTITY, readOnly, p);
}
/**
@@ -1940,14 +2496,15 @@ public class GridFunc {
* @param trans Transforming closure to convert from T1 to T2.
* @param readOnly If {@code true}, then resulting iterator will not allow modifications
* to the underlying collection.
- * @param p Filtering predicate.
+ * @param p Optional filtering predicates.
* @param <T1> Type of the collection elements.
* @param <T2> Type of returned elements.
* @return Iterator from given collection and optional filtering predicate.
*/
public static <T1, T2> GridIterator<T2> iterator(final Iterable<? extends T1> c,
- final IgniteClosure<? super T1, T2> trans, final boolean readOnly, final IgnitePredicate<? super T1> p) {
- A.notNull(c, "c", trans, "trans", p, "p");
+ final IgniteClosure<? super T1, T2> trans, final boolean readOnly,
+ @Nullable final IgnitePredicate<? super T1>... p) {
+ A.notNull(c, "c", trans, "trans");
if (isAlwaysFalse(p))
return F.emptyIterator();
@@ -1966,36 +2523,53 @@ public class GridFunc {
private Iterator<? extends T1> iter = c.iterator();
@Override public boolean hasNextX() {
- if (!moved)
- return more;
+ if (isEmpty(p))
+ return iter.hasNext();
else {
- more = false;
+ if (!moved)
+ return more;
+ else {
+ more = false;
+
+ while (iter.hasNext()) {
+ elem = iter.next();
- while (iter.hasNext()) {
- elem = iter.next();
+ boolean isAll = true;
- if (p.apply(elem)) {
- more = true;
- moved = false;
+ for (IgnitePredicate<? super T1> r : p)
+ if (r != null && !r.apply(elem)) {
+ isAll = false;
- return true;
+ break;
+ }
+
+ if (isAll) {
+ more = true;
+ moved = false;
+
+ return true;
+ }
}
- }
- elem = null; // Give to GC.
+ elem = null; // Give to GC.
- return false;
+ return false;
+ }
}
}
@Nullable @Override public T2 nextX() {
- if (hasNext()) {
- moved = true;
+ if (isEmpty(p))
+ return trans.apply(iter.next());
+ else {
+ if (hasNext()) {
+ moved = true;
- return trans.apply(elem);
+ return trans.apply(elem);
+ }
+ else
+ throw new NoSuchElementException();
}
- else
- throw new NoSuchElementException();
}
@Override public void removeX() {
@@ -2012,22 +2586,80 @@ public class GridFunc {
* @param trans Transforming closure to convert from T1 to T2.
* @param readOnly If {@code true}, then resulting iterator will not allow modifications
* to the underlying collection.
+ * @param p Optional filtering predicates.
* @return Iterator from given iterator and optional filtering predicate.
*/
- public static <T1, T2> GridIterator<T2> iterator(final Iterator<? extends T1> c,
- final IgniteClosure<? super T1, T2> trans, final boolean readOnly) {
+ public static <T1, T2> Iterator<T2> iterator(final Iterator<? extends T1> c,
+ final IgniteClosure<? super T1, T2> trans,
+ final boolean readOnly,
+ @Nullable final IgnitePredicate<? super T1>... p)
+ {
A.notNull(c, "c", trans, "trans");
+ if (isAlwaysFalse(p))
+ return F.emptyIterator();
+
return new GridIteratorAdapter<T2>() {
/** */
+ private T1 elem;
+
+ /** */
+ private boolean moved = true;
+
+ /** */
+ private boolean more;
+
+ /** */
private Iterator<? extends T1> iter = c;
@Override public boolean hasNextX() {
- return iter.hasNext();
+ if (isEmpty(p))
+ return iter.hasNext();
+ else {
+ if (!moved)
+ return more;
+ else {
+ more = false;
+
+ while (iter.hasNext()) {
+ elem = iter.next();
+
+ boolean isAll = true;
+
+ for (IgnitePredicate<? super T1> r : p)
+ if (r != null && !r.apply(elem)) {
+ isAll = false;
+
+ break;
+ }
+
+ if (isAll) {
+ more = true;
+ moved = false;
+
+ return true;
+ }
+ }
+
+ elem = null; // Give to GC.
+
+ return false;
+ }
+ }
}
@Nullable @Override public T2 nextX() {
- return trans.apply(iter.next());
+ if (isEmpty(p))
+ return trans.apply(iter.next());
+ else {
+ if (hasNext()) {
+ moved = true;
+
+ return trans.apply(elem);
+ }
+ else
+ throw new NoSuchElementException();
+ }
}
@Override public void removeX() {
@@ -2108,35 +2740,23 @@ public class GridFunc {
}
/**
- * Gets predicate that evaluates to {@code true} if its free variable is not {@code null}.
+ * Gets predicate that evaluates to {@code true} if its free variable is {@code null}.
*
* @param <T> Type of the free variable, i.e. the element the predicate is called on.
- * @return Predicate that evaluates to {@code true} if its free variable is not {@code null}.
+ * @return Predicate that evaluates to {@code true} if its free variable is {@code null}.
*/
- @SuppressWarnings("unchecked")
- public static <T> IgnitePredicate<T> notNull() {
- return (IgnitePredicate<T>)IS_NOT_NULL;
+ public static <T> IgnitePredicate<T> isNull() {
+ return (IgnitePredicate<T>) IS_NULL;
}
/**
- * Negates given predicate.
- * <p>
- * Gets predicate that evaluates to {@code true} if any of given predicates
- * evaluates to {@code false}. If all predicates evaluate to {@code true} the
- * result predicate will evaluate to {@code false}.
+ * Gets predicate that evaluates to {@code true} if its free variable is not {@code null}.
*
- * @param p Predicate to negate.
* @param <T> Type of the free variable, i.e. the element the predicate is called on.
- * @return Negated predicate.
+ * @return Predicate that evaluates to {@code true} if its free variable is not {@code null}.
*/
- public static <T> IgnitePredicate<T> not(final IgnitePredicate<? super T> p) {
- A.notNull(p, "p");
-
- return isAlwaysFalse(p) ? F.<T>alwaysTrue() : isAlwaysTrue(p) ? F.<T>alwaysFalse() : new P1<T>() {
- @Override public boolean apply(T t) {
- return !p.apply(t);
- }
- };
+ public static <T> IgnitePredicate<T> notNull() {
+ return (IgnitePredicate<T>) IS_NOT_NULL;
}
/**
@@ -2150,7 +2770,8 @@ public class GridFunc {
* @param <T> Type of the free variable, i.e. the element the predicate is called on.
* @return Negated predicate.
*/
- public static <T> IgnitePredicate<T> not(@Nullable final IgnitePredicate<? super T>[] p) {
+ @SafeVarargs
+ public static <T> IgnitePredicate<T> not(@Nullable final IgnitePredicate<? super T>... p) {
return isAlwaysFalse(p) ? F.<T>alwaysTrue() : isAlwaysTrue(p) ? F.<T>alwaysFalse() : new P1<T>() {
@Override public boolean apply(T t) {
return !isAll(t, p);
@@ -2193,13 +2814,30 @@ public class GridFunc {
}
/**
+ * Gets predicate that evaluates to {@code true} if its free variable is instance of the given class.
+ *
+ * @param cls Class to compare to.
+ * @param <T> Type of the free variable, i.e. the element the predicate is called on.
+ * @return Predicate that evaluates to {@code true} if its free variable is instance
+ * of the given class.
+ */
+ public static <T> IgnitePredicate<T> instanceOf(final Class<?> cls) {
+ A.notNull(cls, "cls");
+
+ return new P1<T>() {
+ @Override public boolean apply(T t) {
+ return t != null && cls.isAssignableFrom(t.getClass());
+ }
+ };
+ }
+
+ /**
* Gets first element from given collection or returns {@code null} if the collection is empty.
*
* @param c A collection.
* @param <T> Type of the collection.
* @return Collections' first element or {@code null} in case if the collection is empty.
*/
- @SuppressWarnings("unchecked")
public static <T> T first(@Nullable Iterable<? extends T> c) {
if (c == null)
return null;
@@ -2232,11 +2870,12 @@ public class GridFunc {
* @param <T> Type of the collection.
* @return Collections' first element or {@code null} in case if the collection is empty.
*/
- @SuppressWarnings("unchecked")
@Nullable public static <T> T last(@Nullable Iterable<? extends T> c) {
if (c == null)
return null;
+ assert c != null;
+
if (c instanceof RandomAccess && c instanceof List) {
List<T> l = (List<T>)c;
@@ -2321,6 +2960,8 @@ public class GridFunc {
return F.alwaysTrue();
if (F0.isAllNodePredicates(ps)) {
+ assert ps != null;
+
Set<UUID> ids = new HashSet<>();
for (IgnitePredicate<? super T> p : ps) {
@@ -2340,6 +2981,8 @@ public class GridFunc {
else {
return new P1<T>() {
@Override public boolean apply(T t) {
+ assert ps != null;
+
for (IgnitePredicate<? super T> p : ps)
if (p != null && !p.apply(t))
return false;
@@ -2376,24 +3019,6 @@ public class GridFunc {
}
/**
- * Gets predicate (not peer-deployable) that returns {@code true} if its free variable is contained
- * in given collection.
- *
- * @param c Collection to check for containment.
- * @param <T> Type of the free variable for the predicate and type of the
- * collection elements.
- * @return Predicate (not peer-deployable) that returns {@code true} if its free variable is
- * contained in given collection.
- */
- public static <T> IgnitePredicate<T> in(@Nullable final Collection<? extends T> c) {
- return isEmpty(c) ? GridFunc.<T>alwaysFalse() : new P1<T>() {
- @Override public boolean apply(T t) {
- return c.contains(t);
- }
- };
- }
-
- /**
* Gets predicate that returns {@code true} if its free variable is not
* contained in given collection.
*
@@ -2406,6 +3031,8 @@ public class GridFunc {
public static <T> IgnitePredicate<T> notIn(@Nullable final Collection<? extends T> c) {
return isEmpty(c) ? GridFunc.<T>alwaysTrue() : new P1<T>() {
@Override public boolean apply(T t) {
+ assert c != null;
+
return !c.contains(t);
}
};
@@ -2418,13 +3045,27 @@ public class GridFunc {
*/
@SuppressWarnings("unchecked")
public static <T, C extends Collection<T>> C addAll(C c, Iterable<? extends T> it) {
+ if (it == null)
+ return c;
+
+ if (it instanceof Collection<?>) {
+ c.addAll((Collection<? extends T>)it);
+
+ return c;
+ }
+
+ return addAll(c, it.iterator());
+ }
+
+ /**
+ * @param c Target collection.
+ * @param it Iterator to fetch.
+ * @return Modified target collection.
+ */
+ public static <T, C extends Collection<T>> C addAll(C c, Iterator<? extends T> it) {
if (it != null) {
- if (it instanceof Collection<?>)
- c.addAll((Collection<? extends T>)it);
- else {
- for (T item : it)
- c.add(item);
- }
+ while (it.hasNext())
+ c.add(it.next());
}
return c;
@@ -2501,7 +3142,14 @@ public class GridFunc {
* found (or {@code null} if key is not found and closure is not provided). Note that
* in case when key is not found the default value will be put into the map.
* @throws GridClosureException Thrown in case when callable throws exception.
+ * @see #newLinkedList()
+ * @see #newList()
* @see #newSet()
+ * @see #newMap()
+ * @see #newAtomicLong()
+ * @see #newAtomicInt()
+ * @see #newAtomicRef()
+ * @see #newAtomicBoolean()
*/
@Nullable public static <K, V> V addIfAbsent(Map<? super K, V> map, @Nullable K key,
@Nullable Callable<? extends V> c) {
@@ -2579,34 +3227,79 @@ public class GridFunc {
*
* @param c Collection to call closure over.
* @param f Side-effect only closure to call over the collection.
+ * @param p Optional set of predicates. Only if collection element evaluates
+ * to {@code true} for given predicates the closure will be applied to it.
+ * If no predicates provided - closure will be applied to all collection
+ * elements.
* @param <X> Type of the free variable for the closure and type of the
* collection elements.
*/
- public static <X> void forEach(Iterable<? extends X> c, IgniteInClosure<? super X> f) {
- A.notNull(c, "c", f, "f");
+ public static <X> void forEach(Iterable<? extends X> c, IgniteInClosure<? super X> f,
+ @Nullable IgnitePredicate<? super X>... p) {
+ A.notNull(c, "c", f, "f");
+
+ for (X x : c)
+ if (isAll(x, p))
+ f.apply(x);
+ }
+
+ /**
+ * Calls given {@code side-effect only} closure over the each element of the provided array.
+ *
+ * @param c Array to call closure over.
+ * @param f Side-effect only closure to call over the array.
+ * @param p Optional set of predicates. Only if collection element evaluates
+ * to {@code true} for given predicates the closure will be applied to it.
+ * If no predicates provided - closure will be applied to all collection
+ * elements.
+ * @param <X> Type of the free variable for the closure and type of the array
+ * elements.
+ */
+ @SuppressWarnings("RedundantTypeArguments")
+ public static <X> void forEach(X[] c, IgniteInClosure<? super X> f, @Nullable IgnitePredicate<? super X>... p) {
+ A.notNull(c, "c", f, "f");
+
+ F.<X>forEach(asList(c), f, p);
+ }
+
+ /**
+ * Adds (copies) to given collection all elements in <tt>'from'</tt> array.
+ *
+ * @param to Collection to copy to.
+ * @param from Array to copy from.
+ * @param <T> Type of the free variable for the predicate and type of the collection elements.
+ * @return Collection to copy to.
+ */
+ public static <T> Collection<T> copy(Collection<T> to, T... from) {
+ A.notNull(to, "to", from, "from");
- for (X x : c)
- f.apply(x);
+ copy(to, asList(from));
+
+ return to;
}
/**
- * Calls given {@code side-effect only} closure over the each element of the provided
- * collection.
+ * Adds (copies) to given collection using provided predicates. Element is copied if all
+ * predicates evaluate to {@code true}.
*
- * @param c Collection to call closure over.
- * @param f Side-effect only closure to call over the collection.
- * @param p Optional predicate. Only if collection element evaluates
- * to {@code true} for given predicate the closure will be applied to it.
- * @param <X> Type of the free variable for the closure and type of the
- * collection elements.
+ * @param to Collection to copy to.
+ * @param from Collection to copy from.
+ * @param p Optional set of predicates to use for filtration.
+ * @param <T> Type of the free variable for the predicate and type of the collection elements.
+ * @return Collection to copy to.
*/
- public static <X> void forEach(Iterable<? extends X> c, IgniteInClosure<? super X> f,
- IgnitePredicate<? super X> p) {
- A.notNull(c, "c", f, "f", p, "f");
+ public static <T> Collection<T> copy(Collection<T> to, Iterable<? extends T> from,
+ @Nullable IgnitePredicate<? super T>... p) {
+ A.notNull(to, "to", from, "from");
- for (X x : c)
- if (p.apply(x))
- f.apply(x);
+ if (!isAlwaysFalse(p)) {
+ for (T t : from) {
+ if (isAll(t, p))
+ to.add(t);
+ }
+ }
+
+ return to;
}
/**
@@ -2643,6 +3336,21 @@ public class GridFunc {
}
/**
+ * Transforms an array to read only collection using provided closure.
+ *
+ * @param c Initial array to transform.
+ * @param f Closure to use for transformation.
+ * @param <X> Type of the free variable for the closure and type of the array elements.
+ * @param <Y> Type of the closure's return value.
+ * @return Transformed read only collection.
+ */
+ public static <X, Y> Collection<Y> transform(X[] c, IgniteClosure<? super X, Y> f) {
+ A.notNull(c, "c", f, "f");
+
+ return viewReadOnly(asList(c), f);
+ }
+
+ /**
* Tests if all provided predicates evaluate to {@code true} for given value. Note that
* evaluation will be short-circuit when first predicate evaluated to {@code false} is found.
*
@@ -2653,7 +3361,7 @@ public class GridFunc {
* @return Returns {@code true} if given set of predicates is {@code null}, is empty, or all predicates
* evaluate to {@code true} for given value, {@code false} otherwise.
*/
- public static <T> boolean isAll(@Nullable T t, @Nullable IgnitePredicate<? super T>[] p) {
+ public static <T> boolean isAll(@Nullable T t, @Nullable IgnitePredicate<? super T>... p) {
if (p != null)
for (IgnitePredicate<? super T> r : p)
if (r != null && !r.apply(t))
@@ -2663,6 +3371,27 @@ public class GridFunc {
}
/**
+ * Tests if any of provided predicates evaluate to {@code true} for given value. Note
+ * that evaluation will be short-circuit when first predicate evaluated to {@code true}
+ * is found.
+ *
+ * @param t Value to test.
+ * @param p Optional set of predicates to use for evaluation.
+ * @param <T> Type of the value and free variable of the predicates.
+ * @return Returns {@code true} if any of predicates evaluates to {@code true} for given
+ * value, {@code false} otherwise. Returns {@code false} if given set of predicates
+ * is {@code null} or empty.
+ */
+ public static <T> boolean isAny(@Nullable T t, @Nullable IgnitePredicate<? super T>... p) {
+ if (p != null)
+ for (IgnitePredicate<? super T> r : p)
+ if (r != null && r.apply(t))
+ return true;
+
+ return false;
+ }
+
+ /**
* Creates an absolute (no-arg) closure that does nothing.
*
* @return Absolute (no-arg) closure that does nothing.
@@ -2682,12 +3411,14 @@ public class GridFunc {
* @return First element in given collection for which predicate evaluates to
* {@code true} - or {@code null} if such element cannot be found.
*/
- @Nullable public static <V> V find(Iterable<? extends V> c, @Nullable V dfltVal, IgnitePredicate<? super V> p) {
+ @SafeVarargs
+ @Nullable public static <V> V find(Iterable<? extends V> c, @Nullable V dfltVal,
+ @Nullable IgnitePredicate<? super V>... p) {
A.notNull(c, "c");
- if (!isAlwaysFalse(p)) {
+ if (!isEmpty(p) && !isAlwaysFalse(p)) {
for (V v : c) {
- if (p.apply(v))
+ if (isAny(v, p))
return v;
}
}
@@ -2696,6 +3427,35 @@ public class GridFunc {
}
/**
+ * Finds, transforms and returns first element in given collection for which any of
+ * the provided predicates evaluates to {@code true}.
+ *
+ * @param c Input collection.
+ * @param dfltVal Default value to return when no element is found.
+ * @param f Transforming closure.
+ * @param p Optional set of finder predicates.
+ * @param <V> Type of the collection elements.
+ * @return First element in given collection for which predicate evaluates to
+ * {@code true} - or {@code null} if such element cannot be found.
+ */
+ public static <V, Y> Y find(Iterable<? extends V> c, @Nullable Y dfltVal, IgniteClosure<? super V, Y> f,
+ @Nullable IgnitePredicate<? super V>... p) {
+ A.notNull(c, "c", f, "f");
+
+ if (isAlwaysTrue(p) && c.iterator().hasNext())
+ return f.apply(c.iterator().next());
+
+ if (!isEmpty(p) && !isAlwaysFalse(p)) {
+ for (V v : c) {
+ if (isAny(v, p))
+ return f.apply(v);
+ }
+ }
+
+ return dfltVal;
+ }
+
+ /**
* Checks if collection {@code c1} contains any elements from collection {@code c2}.
*
* @param c1 Collection to check for containment. If {@code null} - this method returns {@code false}.
@@ -2722,7 +3482,7 @@ public class GridFunc {
* @return {@code true} if collection {@code c1} contains at least one element from collection
* {@code c2}.
*/
- public static <T> boolean containsAny(@Nullable Collection<? extends T> c1, @Nullable T[] c2) {
+ public static <T> boolean containsAny(@Nullable Collection<? extends T> c1, @Nullable T... c2) {
if (c1 != null && !c1.isEmpty() && c2 != null && c2.length > 0)
for (T t : c2)
if (c1.contains(t))
@@ -2766,6 +3526,35 @@ public class GridFunc {
}
/**
+ * Partitions input collection in two: first containing elements for which given
+ * predicate evaluates to {@code true} - and second containing the elements for which
+ * predicate evaluates to {@code false}.
+ *
+ * @param c Input collection.
+ * @param p Partitioning predicate.
+ * @param <V> Type of the collection elements.
+ * @return Tuple of two collections: first containing elements for which given predicate
+ * evaluates to {@code true} - and second containing the elements for which predicate
+ * evaluates to {@code false}.
+ */
+ public static <V> IgniteBiTuple<Collection<V>, Collection<V>> partition(Iterable<? extends V> c,
+ IgnitePredicate<? super V> p) {
+ A.notNull(c, "c", p, "p");
+
+ Collection<V> c1 = new LinkedList<>();
+ Collection<V> c2 = new LinkedList<>();
+
+ for (V v : c) {
+ if (p.apply(v))
+ c1.add(v);
+ else
+ c2.add(v);
+ }
+
+ return t(c1, c2);
+ }
+
+ /**
* Checks for existence of the element in input collection for which all provided predicates
* evaluate to {@code true}.
*
@@ -2775,16 +3564,18 @@ public class GridFunc {
* @return {@code true} if input collection contains element for which all the provided
* predicates evaluates to {@code true} - otherwise returns {@code false}.
*/
- public static <V> boolean exist(Iterable<? extends V> c, IgnitePredicate<? super V> p) {
+ public static <V> boolean exist(Iterable<? extends V> c, @Nullable IgnitePredicate<? super V>... p) {
A.notNull(c, "c");
if (isAlwaysFalse(p))
return false;
else if (isAlwaysTrue(p))
return true;
+ else if (isEmpty(p))
+ return true;
else
for (V v : c)
- if (p.apply(v))
+ if (isAll(v, p))
return true;
return false;
@@ -2801,16 +3592,16 @@ public class GridFunc {
* @return Returns {@code true} if all given predicates evaluate to {@code true} for
* all elements. Returns {@code false} otherwise.
*/
- public static <V> boolean forAll(Iterable<? extends V> c, IgnitePredicate<? super V> p) {
+ public static <V> boolean forAll(Iterable<? extends V> c, @Nullable IgnitePredicate<? super V>... p) {
A.notNull(c, "c");
if (isAlwaysFalse(p))
return false;
else if (isAlwaysTrue(p))
return true;
- else {
+ else if (!isEmpty(p)) {
for (V v : c) {
- if (!p.apply(v))
+ if (!isAll(v, p))
return false;
}
}
@@ -2830,18 +3621,17 @@ public class GridFunc {
* entries. Returns {@code false} otherwise.
*/
public static <K1, K extends K1, V1, V extends V1> boolean forAll(Map<K, V> m,
- IgnitePredicate<? super Map.Entry<K, V>> p) {
+ @Nullable IgnitePredicate<? super Map.Entry<K, V>>... p) {
A.notNull(m, "m");
if (isAlwaysFalse(p))
return false;
else if (isAlwaysTrue(p))
return true;
- else {
+ else if (!isEmpty(p))
for (Map.Entry<K, V> e : m.entrySet())
- if (!p.apply(e))
+ if (!isAll(e, p))
return false;
- }
return true;
}
@@ -2858,16 +3648,20 @@ public class GridFunc {
* @return Returns {@code true} if all given predicates evaluate to {@code true} for
* at least one element. Returns {@code false} otherwise.
*/
- public static <V> boolean forAny(Iterable<? extends V> c, IgnitePredicate<? super V> p) {
+ public static <V> boolean forAny(Iterable<? extends V> c, @Nullable IgnitePredicate<? super V>... p) {
A.notNull(c, "c");
- if (isAlwaysFalse(p))
+ if (!c.iterator().hasNext())
+ return false;
+ else if (isEmpty(p))
+ return true;
+ else if (isAlwaysFalse(p))
return false;
else if (isAlwaysTrue(p))
return true;
else {
for (V v : c)
- if (p.apply(v))
+ if (isAll(v, p))
return true;
return false;
@@ -2897,17 +3691,22 @@ public class GridFunc {
*
* @param c Input collection.
* @param b Optional first folding pair element.
- * @param fs Optional folding closure.
+ * @param fs Optional set of folding closures.
* @param <D> Type of the input collection elements and type of the free variable for the closure.
* @param <B> Type of the folding value and return type of the closure.
* @return Value representing folded collection.
*/
@Nullable public static <D, B> B fold(Iterable<? extends D> c, @Nullable B b,
- IgniteBiClosure<? super D, ? super B, B> fs) {
+ @Nullable IgniteBiClosure<? super D, ? super B, B>... fs) {
A.notNull(c, "c");
- for (D e : c)
- b = fs.apply(e, b);
+ if (!isEmpty(fs))
+ for (D e : c) {
+ assert fs != null;
+
+ for (IgniteBiClosure<? super D, ? super B, B> f : fs)
+ b = f.apply(e, b);
+ }
<TRUNCATED>
[05/11] ignite git commit: Revert "IGNITE-2330: Simplified GridFunc."
Posted by vo...@apache.org.
Revert "IGNITE-2330: Simplified GridFunc."
This reverts commit 80579253febd6389dbb3a84706671cc8083df1f2.
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/ddbe2d59
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/ddbe2d59
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/ddbe2d59
Branch: refs/heads/ignite-2314
Commit: ddbe2d596e5aaf0b04a26c584dfe2ca8a664c570
Parents: cd5cd2e
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Sun Jan 3 23:49:09 2016 +0400
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Sun Jan 3 23:49:09 2016 +0400
----------------------------------------------------------------------
.../apache/ignite/internal/IgniteKernal.java | 27 +-
.../internal/cluster/ClusterGroupAdapter.java | 10 +-
.../discovery/GridDiscoveryManager.java | 19 +-
.../loadbalancer/GridLoadBalancerManager.java | 8 +-
.../processors/cache/GridCacheAdapter.java | 14 +-
.../processors/cache/GridCacheContext.java | 3 +-
.../cache/GridCacheEvictionManager.java | 5 +-
.../processors/cache/GridCacheIoManager.java | 5 +-
.../processors/cache/GridCacheIterator.java | 4 +-
.../processors/cache/GridCacheKeySet.java | 2 +-
.../GridCachePartitionExchangeManager.java | 12 +-
.../processors/cache/GridCacheProcessor.java | 9 +-
.../processors/cache/GridCacheSwapManager.java | 7 +-
.../processors/cache/GridCacheUtils.java | 92 -
.../cache/GridCacheValueCollection.java | 5 +-
.../processors/cache/IgniteCacheProxy.java | 4 +-
.../dht/GridClientPartitionTopology.java | 3 +-
.../dht/GridDhtPartitionTopologyImpl.java | 3 +-
.../dht/GridDhtTransactionalCacheAdapter.java | 3 +-
.../distributed/dht/GridDhtTxLocalAdapter.java | 3 +-
.../distributed/dht/GridDhtTxPrepareFuture.java | 5 +-
.../dht/atomic/GridDhtAtomicCache.java | 21 +-
.../dht/preloader/GridDhtForceKeysFuture.java | 8 +-
.../distributed/near/GridNearCacheAdapter.java | 11 +-
.../local/atomic/GridLocalAtomicCache.java | 17 +-
.../GridCacheAtomicStampedImpl.java | 5 +-
.../internal/processors/igfs/IgfsProcessor.java | 8 +-
.../dotnet/PlatformDotNetCacheStore.java | 4 +-
.../top/GridTopologyCommandHandler.java | 3 +-
.../org/apache/ignite/internal/util/F0.java | 325 +++-
.../internal/util/GridExecutionStatistics.java | 4 +-
.../ignite/internal/util/IgniteUtils.java | 4 +-
.../ignite/internal/util/lang/GridFunc.java | 1764 ++++++++++++++----
.../ignite/internal/util/lang/GridTuple3.java | 1 +
.../ignite/internal/util/lang/GridTuple4.java | 1 +
.../ignite/internal/util/lang/GridTuple5.java | 1 +
.../ignite/internal/util/lang/GridTuple6.java | 1 +
.../ignite/internal/util/lang/GridTupleV.java | 1 +
.../java/org/apache/ignite/lang/IgniteUuid.java | 2 +-
.../memory/MemoryEventStorageSpi.java | 1 -
.../cache/GridCacheLuceneQueryIndexTest.java | 4 +-
.../distributed/GridCacheEventAbstractTest.java | 2 +-
.../IpcSharedMemoryCrashDetectionSelfTest.java | 16 +-
.../ignite/lang/GridBasicPerformanceTest.java | 10 +-
.../ignite/lang/GridFuncPerformanceTest.java | 102 +
.../ignite/loadtest/GridLoadTestStatistics.java | 2 +-
.../query/h2/opt/GridH2TreeIndex.java | 4 +-
47 files changed, 1988 insertions(+), 577 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
index 7fc0313..14b5816 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
@@ -589,16 +589,8 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable {
@Override public List<String> getLifecycleBeansFormatted() {
LifecycleBean[] beans = cfg.getLifecycleBeans();
- if (F.isEmpty(beans))
- return Collections.emptyList();
- else {
- List<String> res = new ArrayList<>(beans.length);
-
- for (LifecycleBean bean : beans)
- res.add(String.valueOf(bean));
-
- return res;
- }
+ return F.isEmpty(beans) ? Collections.<String>emptyList() :
+ (List<String>)F.transform(beans, F.<LifecycleBean>string());
}
/**
@@ -2277,19 +2269,16 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable {
Collection<Object> objs = new ArrayList<>();
if (!F.isEmpty(cfg.getLifecycleBeans()))
- Collections.addAll(objs, cfg.getLifecycleBeans());
+ F.copy(objs, cfg.getLifecycleBeans());
if (!F.isEmpty(cfg.getSegmentationResolvers()))
- Collections.addAll(objs, cfg.getSegmentationResolvers());
+ F.copy(objs, cfg.getSegmentationResolvers());
- if (cfg.getConnectorConfiguration() != null) {
- objs.add(cfg.getConnectorConfiguration().getMessageInterceptor());
- objs.add(cfg.getConnectorConfiguration().getSslContextFactory());
- }
+ if (cfg.getConnectorConfiguration() != null)
+ F.copy(objs, cfg.getConnectorConfiguration().getMessageInterceptor(),
+ cfg.getConnectorConfiguration().getSslContextFactory());
- objs.add(cfg.getMarshaller());
- objs.add(cfg.getGridLogger());
- objs.add(cfg.getMBeanServer());
+ F.copy(objs, cfg.getMarshaller(), cfg.getGridLogger(), cfg.getMBeanServer());
return objs;
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java
index a153b83..75168a1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java
@@ -51,7 +51,6 @@ import org.apache.ignite.internal.IgniteServicesImpl;
import org.apache.ignite.internal.IgnitionEx;
import org.apache.ignite.internal.executor.GridExecutorService;
import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager;
-import org.apache.ignite.internal.util.lang.GridNodePredicate;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.A;
import org.apache.ignite.internal.util.typedef.internal.U;
@@ -368,7 +367,8 @@ public class ClusterGroupAdapter implements ClusterGroupEx, Externalizable {
guard();
try {
- ctx.resource().injectGeneric(p);
+ if (p != null)
+ ctx.resource().injectGeneric(p);
return new ClusterGroupAdapter(ctx, subjId, this.p != null ? F.and(p, this.p) : p);
}
@@ -703,7 +703,6 @@ public class ClusterGroupAdapter implements ClusterGroupEx, Externalizable {
}
/** {@inheritDoc} */
- @SuppressWarnings("unchecked")
@Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
gridName = U.readString(in);
subjId = U.readUuid(in);
@@ -920,7 +919,7 @@ public class ClusterGroupAdapter implements ClusterGroupEx, Externalizable {
ClusterNode node = isOldest ? U.oldest(super.nodes(), null) : U.youngest(super.nodes(), null);
- IgnitePredicate<ClusterNode> p = new GridNodePredicate(node);
+ IgnitePredicate<ClusterNode> p = F.nodeForNodes(node);
state = new AgeClusterGroupState(node, p, lastTopVer);
}
@@ -962,7 +961,8 @@ public class ClusterGroupAdapter implements ClusterGroupEx, Externalizable {
guard();
try {
- ctx.resource().injectGeneric(p);
+ if (p != null)
+ ctx.resource().injectGeneric(p);
return new ClusterGroupAdapter(ctx, this.subjId, new GroupPredicate(this, p));
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
index 844fd0f..72a2bef 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
@@ -30,6 +30,7 @@ import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -78,6 +79,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
import org.apache.ignite.internal.processors.jobmetrics.GridJobMetrics;
import org.apache.ignite.internal.processors.security.SecurityContext;
import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridBoundedConcurrentOrderedMap;
import org.apache.ignite.internal.util.GridSpinBusyLock;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
@@ -1315,26 +1317,13 @@ public class GridDiscoveryManager extends GridManagerAdapter<DiscoverySpi> {
}
/**
- * Gets collection of node for given node IDs.
- *
- * @param ids Ids to include.
- * @return Collection with all alive nodes for given IDs.
- */
- public Collection<ClusterNode> nodes(@Nullable Collection<UUID> ids) {
- return F.isEmpty(ids) ? Collections.<ClusterNode>emptyList() :
- F.view(
- F.viewReadOnly(ids, U.id2Node(ctx)),
- F.notNull());
- }
-
- /**
* Gets collection of node for given node IDs and predicates.
*
* @param ids Ids to include.
* @param p Filter for IDs.
* @return Collection with all alive nodes for given IDs.
*/
- public Collection<ClusterNode> nodes(@Nullable Collection<UUID> ids, IgnitePredicate<UUID> p) {
+ public Collection<ClusterNode> nodes(@Nullable Collection<UUID> ids, IgnitePredicate<UUID>... p) {
return F.isEmpty(ids) ? Collections.<ClusterNode>emptyList() :
F.view(
F.viewReadOnly(ids, U.id2Node(ctx), p),
@@ -2576,7 +2565,7 @@ public class GridDiscoveryManager extends GridManagerAdapter<DiscoverySpi> {
nearEnabledCaches = Collections.unmodifiableSet(nearEnabledSet);
daemonNodes = Collections.unmodifiableList(new ArrayList<>(
- F.view(F.concat(false, loc, rmts), F.not(daemonFilter))));
+ F.view(F.concat(false, loc, rmts), F0.not(daemonFilter))));
Map<UUID, ClusterNode> nodeMap = new HashMap<>(allNodes().size() + daemonNodes.size(), 1.0f);
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/managers/loadbalancer/GridLoadBalancerManager.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/loadbalancer/GridLoadBalancerManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/loadbalancer/GridLoadBalancerManager.java
index efe09b0..631168b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/loadbalancer/GridLoadBalancerManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/loadbalancer/GridLoadBalancerManager.java
@@ -18,7 +18,6 @@
package org.apache.ignite.internal.managers.loadbalancer;
import java.util.Collection;
-import java.util.LinkedList;
import java.util.List;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
@@ -93,12 +92,7 @@ public class GridLoadBalancerManager extends GridManagerAdapter<LoadBalancingSpi
if (F.isEmpty(exclNodes))
return GridLoadBalancerManager.this.getBalancedNode(ses, top, job);
- List<ClusterNode> nodes = new LinkedList<>();
-
- for (ClusterNode topNode : top) {
- if (!exclNodes.contains(topNode))
- nodes.add(topNode);
- }
+ List<ClusterNode> nodes = F.loseList(top, true, exclNodes);
if (nodes.isEmpty())
return null;
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
index a4cf737..5d4c386 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
@@ -106,6 +106,7 @@ import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.lang.GridClosureException;
import org.apache.ignite.internal.util.lang.GridTriple;
import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.C1;
import org.apache.ignite.internal.util.typedef.C2;
import org.apache.ignite.internal.util.typedef.CI1;
import org.apache.ignite.internal.util.typedef.CI2;
@@ -2149,7 +2150,12 @@ public abstract class GridCacheAdapter<K, V> implements IgniteInternalCache<K, V
return syncOp(new SyncOp<Map<K, EntryProcessorResult<T>>>(keys.size() == 1) {
@Nullable @Override public Map<K, EntryProcessorResult<T>> op(IgniteTxLocalAdapter tx)
throws IgniteCheckedException {
- Map<? extends K, EntryProcessor<K, V, Object>> invokeMap = CU.invokeMap(keys, entryProcessor);
+ Map<? extends K, EntryProcessor<K, V, Object>> invokeMap = F.viewAsMap(keys,
+ new C1<K, EntryProcessor<K, V, Object>>() {
+ @Override public EntryProcessor apply(K k) {
+ return entryProcessor;
+ }
+ });
IgniteInternalFuture<GridCacheReturn> fut = tx.invokeAsync(ctx, invokeMap, args);
@@ -2216,7 +2222,11 @@ public abstract class GridCacheAdapter<K, V> implements IgniteInternalCache<K, V
IgniteInternalFuture<?> fut = asyncOp(new AsyncInOp(keys) {
@Override public IgniteInternalFuture<GridCacheReturn> inOp(IgniteTxLocalAdapter tx) {
- Map<? extends K, EntryProcessor<K, V, Object>> invokeMap = CU.invokeMap(keys, entryProcessor);
+ Map<? extends K, EntryProcessor<K, V, Object>> invokeMap = F.viewAsMap(keys, new C1<K, EntryProcessor<K, V, Object>>() {
+ @Override public EntryProcessor apply(K k) {
+ return entryProcessor;
+ }
+ });
return tx.invokeAsync(ctx, invokeMap, args);
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
index ef49567..c10ebf3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
@@ -85,6 +85,7 @@ import org.apache.ignite.internal.processors.closure.GridClosureProcessor;
import org.apache.ignite.internal.processors.offheap.GridOffHeapProcessor;
import org.apache.ignite.internal.processors.plugin.CachePluginManager;
import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.lang.GridFunc;
import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
import org.apache.ignite.internal.util.tostring.GridToStringExclude;
@@ -1511,7 +1512,7 @@ public class GridCacheContext<K, V> implements Externalizable {
Collection<ClusterNode> nearNodes = null;
if (!F.isEmpty(readers)) {
- nearNodes = discovery().nodes(readers, F.notEqualTo(nearNodeId));
+ nearNodes = discovery().nodes(readers, F0.notEqualTo(nearNodeId));
if (log.isDebugEnabled())
log.debug("Mapping entry to near nodes [nodes=" + U.nodeIds(nearNodes) + ", entry=" + entry + ']');
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java
index f542f4b..845e204 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java
@@ -63,6 +63,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
import org.apache.ignite.internal.processors.timeout.GridTimeoutObject;
import org.apache.ignite.internal.processors.timeout.GridTimeoutObjectAdapter;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridBusyLock;
import org.apache.ignite.internal.util.GridConcurrentHashSet;
import org.apache.ignite.internal.util.GridUnsafe;
@@ -1309,7 +1310,7 @@ public class GridCacheEvictionManager extends GridCacheManagerAdapter {
try {
GridCacheVersion ver = e.version();
- return info.version().equals(ver) && F.isAll(e, info.filter());
+ return info.version().equals(ver) && F.isAll(info.filter());
}
catch (GridCacheEntryRemovedException ignored) {
return false;
@@ -1340,7 +1341,7 @@ public class GridCacheEvictionManager extends GridCacheManagerAdapter {
Collection<ClusterNode> backups;
if (evictSync)
- backups = F.view(cctx.dht().topology().nodes(entry.partition(), topVer), F.notEqualTo(cctx.localNode()));
+ backups = F.view(cctx.dht().topology().nodes(entry.partition(), topVer), F0.notEqualTo(cctx.localNode()));
else
backups = Collections.emptySet();
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java
index 4c9cdf2..0aa8b1b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java
@@ -57,6 +57,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPr
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareResponse;
import org.apache.ignite.internal.processors.cache.query.GridCacheQueryRequest;
import org.apache.ignite.internal.processors.cache.query.GridCacheQueryResponse;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridLeanSet;
import org.apache.ignite.internal.util.GridSpinReadWriteLock;
import org.apache.ignite.internal.util.typedef.CI1;
@@ -730,7 +731,7 @@ public class GridCacheIoManager extends GridCacheSharedManagerAdapter {
}
if (added) {
- if (!F.exist(F.nodeIds(nodes), F.not(F.contains(leftIds)))) {
+ if (!F.exist(F.nodeIds(nodes), F0.not(F.contains(leftIds)))) {
if (log.isDebugEnabled())
log.debug("Message will not be sent because all nodes left topology [msg=" + msg +
", nodes=" + U.toShortString(nodes) + ']');
@@ -766,7 +767,7 @@ public class GridCacheIoManager extends GridCacheSharedManagerAdapter {
U.sleep(retryDelay);
}
- if (!F.exist(F.nodeIds(nodes), F.not(F.contains(leftIds)))) {
+ if (!F.exist(F.nodeIds(nodes), F0.not(F.contains(leftIds)))) {
if (log.isDebugEnabled())
log.debug("Message will not be sent because all nodes left topology [msg=" + msg + ", nodes=" +
U.toShortString(nodes) + ']');
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIterator.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIterator.java
index e4a471c..19da4a8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIterator.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIterator.java
@@ -19,8 +19,6 @@ package org.apache.ignite.internal.processors.cache;
import java.util.Iterator;
import javax.cache.Cache;
-
-import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridSerializableIterator;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.lang.IgniteClosure;
@@ -59,7 +57,7 @@ public class GridCacheIterator<K, V, T> implements GridSerializableIterator<T> {
) {
this.cctx = cctx;
- it = F.identityIterator(c, F0.and(filter));
+ it = F.iterator0(c, false, filter);
this.trans = trans;
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheKeySet.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheKeySet.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheKeySet.java
index af0c704..6d18b7d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheKeySet.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheKeySet.java
@@ -66,7 +66,7 @@ public class GridCacheKeySet<K, V> extends GridSerializableSet<K> {
/** {@inheritDoc} */
@Override public Iterator<K> iterator() {
- return new GridCacheIterator<>(ctx, map.values(), CU.<K, V>cacheEntry2Key(), filter);
+ return new GridCacheIterator<>(ctx, map.values(), F.<K, V>cacheEntry2Key(), filter);
}
/** {@inheritDoc} */
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java
index 4732597..a0f7f93 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java
@@ -80,7 +80,6 @@ import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.internal.util.worker.GridWorker;
import org.apache.ignite.lang.IgniteBiInClosure;
-import org.apache.ignite.lang.IgnitePredicate;
import org.apache.ignite.lang.IgniteProductVersion;
import org.apache.ignite.lang.IgniteUuid;
import org.apache.ignite.thread.IgniteThread;
@@ -1271,16 +1270,9 @@ public class GridCachePartitionExchangeManager<K, V> extends GridCacheSharedMana
}
// After workers line up and before preloading starts we initialize all futures.
- if (log.isDebugEnabled()) {
- IgnitePredicate p = new IgnitePredicate<IgniteInternalFuture<?>>() {
- @Override public boolean apply(IgniteInternalFuture<?> f) {
- return !f.isDone();
- }
- };
-
+ if (log.isDebugEnabled())
log.debug("Before waiting for exchange futures [futs" +
- F.view(exchFuts.values(), p) + ", worker=" + this + ']');
- }
+ F.view(exchFuts.values(), F.unfinishedFutures()) + ", worker=" + this + ']');
// Take next exchange future.
exchFut = poll(futQ, timeout, this);
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
index f6fc9d4..ff02e70 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
@@ -96,6 +96,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersionManager;
import org.apache.ignite.internal.processors.plugin.CachePluginManager;
import org.apache.ignite.internal.processors.query.GridQueryProcessor;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.future.GridCompoundFuture;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
@@ -1159,12 +1160,8 @@ public class GridCacheProcessor extends GridProcessorAdapter {
log.debug("Executed onKernalStart() callback for DHT cache: " + dht.name());
}
- Collection<GridCacheManager> excluded = dhtExcludes(ctx);
-
- for (GridCacheManager mgr : ctx.managers()) {
- if (!excluded.contains(mgr))
- mgr.onKernalStart();
- }
+ for (GridCacheManager mgr : F.view(ctx.managers(), F0.notContains(dhtExcludes(ctx))))
+ mgr.onKernalStart();
cache.onKernalStart();
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java
index 4490f63..37b5e15 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java
@@ -42,6 +42,7 @@ import org.apache.ignite.internal.processors.offheap.GridOffHeapProcessor;
import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
import org.apache.ignite.internal.util.GridConcurrentHashSet;
import org.apache.ignite.internal.util.GridEmptyCloseableIterator;
+import org.apache.ignite.internal.util.GridEmptyIterator;
import org.apache.ignite.internal.util.GridWeakIterator;
import org.apache.ignite.internal.util.lang.GridCloseableIterator;
import org.apache.ignite.internal.util.lang.GridTuple;
@@ -1580,7 +1581,7 @@ public class GridCacheSwapManager extends GridCacheManagerAdapter {
*/
public <K, V> Iterator<Map.Entry<K, V>> lazySwapIterator() throws IgniteCheckedException {
if (!swapEnabled)
- return F.emptyIterator();
+ return new GridEmptyIterator<>();
return lazyIterator(cctx.gridSwap().rawIterator(spaceName));
}
@@ -1671,7 +1672,7 @@ public class GridCacheSwapManager extends GridCacheManagerAdapter {
private <K, V> Iterator<Map.Entry<K, V>> lazyIterator(
final GridCloseableIterator<? extends Map.Entry<byte[], byte[]>> it) {
if (it == null)
- return F.emptyIterator();
+ return new GridEmptyIterator<>();
checkIteratorQueue();
@@ -1742,7 +1743,7 @@ public class GridCacheSwapManager extends GridCacheManagerAdapter {
private Iterator<KeyCacheObject> keyIterator(
final GridCloseableIterator<? extends Map.Entry<byte[], byte[]>> it) {
if (it == null)
- return F.emptyIterator();
+ return new GridEmptyIterator<>();
checkIteratorQueue();
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java
index b7baf7e..51f6dcd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java
@@ -43,8 +43,6 @@ import javax.cache.configuration.Factory;
import javax.cache.expiry.Duration;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.integration.CacheWriterException;
-import javax.cache.processor.EntryProcessor;
-
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
@@ -286,41 +284,6 @@ public class GridCacheUtils {
}
};
- /** */
- private static final IgniteClosure CACHE_ENTRY_KEY = new IgniteClosure() {
- @Override public Object apply(Object o) {
- return ((Cache.Entry)o).getKey();
- }
-
- @Override public String toString() {
- return "Map entry to key transformer closure.";
- }
- };
-
- /** */
- private static final IgniteClosure CACHE_ENTRY_VAL_GET = new IgniteClosure() {
- @SuppressWarnings({"unchecked"})
- @Nullable @Override public Object apply(Object o) {
- return ((Cache.Entry)o).getValue();
- }
-
- @Override public String toString() {
- return "Cache entry to get-value transformer closure.";
- }
- };
-
- /** */
- private static final IgnitePredicate CACHE_ENTRY_HAS_PEEK_VAL = new IgnitePredicate() {
- @SuppressWarnings({"unchecked"})
- @Override public boolean apply(Object o) {
- return ((Cache.Entry)o).getValue() != null;
- }
-
- @Override public String toString() {
- return "Cache entry has-peek-value predicate.";
- }
- };
-
/**
* Ensure singleton.
*/
@@ -1900,59 +1863,4 @@ public class GridCacheUtils {
return res;
}
-
- /**
- * Create invoke map for the given key set. All provided values will be set to the passed entry processor.
- *
- * @param keys Keys.
- * @param entryProc Entry processor.
- * @return Invoke map.
- */
- @SuppressWarnings("unchecked")
- public static <K, V, T> Map<? extends K, EntryProcessor<K, V, Object>> invokeMap(
- final Set<? extends K> keys, final EntryProcessor<K, V, T> entryProc) {
- return F.viewAsMap(keys,
- new C1<K, EntryProcessor<K, V, Object>>() {
- @Override public EntryProcessor apply(K k) {
- return entryProc;
- }
- });
- }
-
- /**
- * Gets closure that returns key for cache entry. The closure internally
- * delegates to {@link javax.cache.Cache.Entry#getKey()} method.
- *
- * @param <K> Key type.
- * @return Closure that returns key for an entry.
- */
- @SuppressWarnings({"unchecked"})
- public static <K, V> IgniteClosure<Cache.Entry<K, V>, K> cacheEntry2Key() {
- return (IgniteClosure<Cache.Entry<K, V>, K>)CACHE_ENTRY_KEY;
- }
-
- /**
- * Gets closure that returns value for an entry. The closure internally
- * delegates to {@link javax.cache.Cache.Entry#get(Object)} method.
- *
- * @param <K> Key type.
- * @param <V> Value type.
- * @return Closure that returns value for an entry.
- */
- @SuppressWarnings({"unchecked"})
- public static <K, V> IgniteClosure<Cache.Entry<K, V>, V> cacheEntry2Get() {
- return (IgniteClosure<Cache.Entry<K, V>, V>)CACHE_ENTRY_VAL_GET;
- }
-
- /**
- * Gets predicate which returns {@code true} if entry has peek value.
- *
- * @param <K> Cache key type.
- * @param <V> Cache value type.
- * @return Predicate which returns {@code true} if entry has peek value.
- */
- @SuppressWarnings({"unchecked"})
- public static <K, V> IgnitePredicate<Cache.Entry<K, V>> cacheHasPeekValue() {
- return (IgnitePredicate<Cache.Entry<K, V>>)CACHE_ENTRY_HAS_PEEK_VAL;
- }
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheValueCollection.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheValueCollection.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheValueCollection.java
index 68b54b3..2d1686e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheValueCollection.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheValueCollection.java
@@ -27,7 +27,6 @@ import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridSerializableCollection;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.lang.IgnitePredicate;
import org.jetbrains.annotations.Nullable;
@@ -73,8 +72,8 @@ public class GridCacheValueCollection<K, V> extends GridSerializableCollection<V
return new GridCacheIterator<K, V, V>(
ctx,
map.values(),
- CU.<K, V>cacheEntry2Get(),
- ctx.vararg(F0.and(filter, CU.<K, V>cacheHasPeekValue()))
+ F.<K, V>cacheEntry2Get(),
+ ctx.vararg(F0.and(filter, F.<K, V>cacheHasPeekValue()))
) {
{
advance();
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java
index a6e0ea8..27a7587 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java
@@ -69,6 +69,7 @@ import org.apache.ignite.internal.processors.cache.query.CacheQuery;
import org.apache.ignite.internal.processors.cache.query.CacheQueryFuture;
import org.apache.ignite.internal.processors.query.GridQueryProcessor;
import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
+import org.apache.ignite.internal.util.GridEmptyIterator;
import org.apache.ignite.internal.util.future.IgniteFutureImpl;
import org.apache.ignite.internal.util.lang.GridClosureException;
import org.apache.ignite.internal.util.lang.IgniteOutClosureX;
@@ -76,7 +77,6 @@ import org.apache.ignite.internal.util.tostring.GridToStringExclude;
import org.apache.ignite.internal.util.tostring.GridToStringInclude;
import org.apache.ignite.internal.util.typedef.CI1;
import org.apache.ignite.internal.util.typedef.CX1;
-import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.A;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.internal.util.typedef.internal.S;
@@ -579,7 +579,7 @@ public class IgniteCacheProxy<K, V> extends AsyncSupportAdapter<IgniteCache<K, V
return new QueryCursor<Cache.Entry<K, V>>() {
@Override public Iterator<Cache.Entry<K, V>> iterator() {
- return cur != null ? cur.iterator() : F.<Cache.Entry<K, V>>emptyIterator();
+ return cur != null ? cur.iterator() : new GridEmptyIterator<Cache.Entry<K, V>>();
}
@Override public List<Cache.Entry<K, V>> getAll() {
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java
index 9c18ad0..8aef5ad 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java
@@ -37,6 +37,7 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.Gri
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap;
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap2;
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridAtomicLong;
import org.apache.ignite.internal.util.tostring.GridToStringExclude;
import org.apache.ignite.internal.util.typedef.F;
@@ -697,7 +698,7 @@ public class GridClientPartitionTopology implements GridDhtPartitionTopology {
// Remove obsolete mappings.
if (cur != null) {
- for (Integer p : F.view(cur.keySet(), F.notIn(parts.keySet()))) {
+ for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
Set<UUID> ids = part2node.get(p);
if (ids != null)
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
index cdf0fc2..a0709c5 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
@@ -41,6 +41,7 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.Gri
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap;
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap2;
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridAtomicLong;
import org.apache.ignite.internal.util.tostring.GridToStringExclude;
import org.apache.ignite.internal.util.typedef.F;
@@ -1054,7 +1055,7 @@ class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
// Remove obsolete mappings.
if (cur != null) {
- for (Integer p : F.view(cur.keySet(), F.notIn(parts.keySet()))) {
+ for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
Set<UUID> ids = part2node.get(p);
if (ids != null)
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java
index 2b40436..ae24ed1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java
@@ -60,6 +60,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridLeanSet;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
import org.apache.ignite.internal.util.lang.GridClosureException;
@@ -1381,7 +1382,7 @@ public abstract class GridDhtTransactionalCacheAdapter<K, V> extends GridDhtCach
Collection<ClusterNode> nearNodes = null;
if (!F.isEmpty(readers)) {
- nearNodes = ctx.discovery().nodes(readers, F.not(F.idForNodeId(nodeId)));
+ nearNodes = ctx.discovery().nodes(readers, F0.not(F.idForNodeId(nodeId)));
if (log.isDebugEnabled())
log.debug("Mapping entry to near nodes [nodes=" + U.toShortString(nearNodes) + ", entry=" + cached +
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java
index 1fe3ae2..534a560 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java
@@ -42,6 +42,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx
import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalAdapter;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridLeanMap;
import org.apache.ignite.internal.util.GridLeanSet;
import org.apache.ignite.internal.util.future.GridEmbeddedFuture;
@@ -640,7 +641,7 @@ public abstract class GridDhtTxLocalAdapter extends IgniteTxLocalAdapter {
// Otherwise, during rollback we will not know whether locks need
// to be rolled back.
// Loose all skipped and previously locked (we cannot reenter locks here).
- final Collection<KeyCacheObject> passedKeys = skipped != null ? F.view(keys, F.notIn(skipped)) : keys;
+ final Collection<KeyCacheObject> passedKeys = skipped != null ? F.view(keys, F0.notIn(skipped)) : keys;
if (log.isDebugEnabled())
log.debug("Lock keys: " + passedKeys);
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
index 23fdbf5..d8b2f37 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
@@ -61,6 +61,7 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
import org.apache.ignite.internal.processors.dr.GridDrType;
import org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException;
import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridLeanSet;
import org.apache.ignite.internal.util.future.GridCompoundFuture;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
@@ -1303,14 +1304,14 @@ public final class GridDhtTxPrepareFuture extends GridCompoundFuture<IgniteInter
if (!F.isEmpty(readers)) {
Collection<ClusterNode> nearNodes =
- cctx.discovery().nodes(readers, F.not(F.idForNodeId(tx.nearNodeId())));
+ cctx.discovery().nodes(readers, F0.not(F.idForNodeId(tx.nearNodeId())));
if (log.isDebugEnabled())
log.debug("Mapping entry to near nodes [nodes=" + U.toShortString(nearNodes) +
", entry=" + entry + ']');
// Exclude DHT nodes.
- map(entry, F.view(nearNodes, F.notIn(dhtNodes)), nearMap);
+ map(entry, F.view(nearNodes, F0.notIn(dhtNodes)), nearMap);
}
else if (log.isDebugEnabled())
log.debug("Entry has no near readers: " + entry);
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java
index e0b108b..393413e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java
@@ -77,10 +77,12 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSing
import org.apache.ignite.internal.processors.cache.dr.GridCacheDrExpirationInfo;
import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo;
import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersionEx;
import org.apache.ignite.internal.processors.timeout.GridTimeoutObject;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridUnsafe;
import org.apache.ignite.internal.util.future.GridEmbeddedFuture;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
@@ -410,9 +412,9 @@ public class GridDhtAtomicCache<K, V> extends GridDhtCacheAdapter<K, V> {
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override public IgniteInternalFuture<V> getAndPutAsync0(K key, V val, @Nullable CacheEntryPredicate... filter) {
- A.notNull(key, "key", val, "val");
+ A.notNull(key, "key");
- return updateAllAsync0(F.asMap(key, val),
+ return updateAllAsync0(F0.asMap(key, val),
null,
null,
null,
@@ -426,9 +428,9 @@ public class GridDhtAtomicCache<K, V> extends GridDhtCacheAdapter<K, V> {
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override public IgniteInternalFuture<Boolean> putAsync0(K key, V val, @Nullable CacheEntryPredicate... filter) {
- A.notNull(key, "key", val, "val");
+ A.notNull(key, "key");
- return updateAllAsync0(F.asMap(key, val),
+ return updateAllAsync0(F0.asMap(key, val),
null,
null,
null,
@@ -443,7 +445,7 @@ public class GridDhtAtomicCache<K, V> extends GridDhtCacheAdapter<K, V> {
@Override public V tryPutIfAbsent(K key, V val) throws IgniteCheckedException {
A.notNull(key, "key", val, "val");
- return (V)updateAllAsync0(F.asMap(key, val),
+ return (V)updateAllAsync0(F0.asMap(key, val),
null,
null,
null,
@@ -793,13 +795,18 @@ public class GridDhtAtomicCache<K, V> extends GridDhtCacheAdapter<K, V> {
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override public <T> IgniteInternalFuture<Map<K, EntryProcessorResult<T>>> invokeAllAsync(Set<? extends K> keys,
- EntryProcessor<K, V, T> entryProcessor, Object... args) {
+ final EntryProcessor<K, V, T> entryProcessor,
+ Object... args) {
A.notNull(keys, "keys", entryProcessor, "entryProcessor");
if (keyCheck)
validateCacheKeys(keys);
- Map<? extends K, EntryProcessor<K, V, Object>> invokeMap = CU.invokeMap(keys, entryProcessor);
+ Map<? extends K, EntryProcessor> invokeMap = F.viewAsMap(keys, new C1<K, EntryProcessor>() {
+ @Override public EntryProcessor apply(K k) {
+ return entryProcessor;
+ }
+ });
CacheOperationContext opCtx = ctx.operationContextPerCall();
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java
index 6b03131..7970a44 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java
@@ -43,6 +43,7 @@ import org.apache.ignite.internal.processors.cache.KeyCacheObject;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtFuture;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridLeanSet;
import org.apache.ignite.internal.util.future.GridCompoundFuture;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
@@ -520,11 +521,8 @@ public final class GridDhtForceKeysFuture<K, V> extends GridCompoundFuture<Objec
if (!cctx.rebalanceEnabled()) {
Collection<KeyCacheObject> retryKeys = F.view(
keys,
- F.and(
- F.notIn(missedKeys),
- F.notIn(F.viewReadOnly(res.forcedInfos(), CU.<KeyCacheObject, V>info2Key()))
- )
- );
+ F0.notIn(missedKeys),
+ F0.notIn(F.viewReadOnly(res.forcedInfos(), CU.<KeyCacheObject, V>info2Key())));
if (!retryKeys.isEmpty())
map(retryKeys, F.concat(false, node, exc));
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java
index 5c48dc7..5bf18d9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java
@@ -64,7 +64,6 @@ import org.apache.ignite.internal.util.typedef.C1;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.P1;
import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.lang.IgniteBiPredicate;
import org.jetbrains.annotations.NotNull;
@@ -417,7 +416,7 @@ public abstract class GridNearCacheAdapter<K, V> extends GridDistributedCacheAda
/** {@inheritDoc} */
@Override public Collection<V> values(CacheEntryPredicate... filter) {
- return new GridCacheValueCollection<>(ctx, entrySet(filter), ctx.vararg(CU.<K, V>cacheHasPeekValue()));
+ return new GridCacheValueCollection<>(ctx, entrySet(filter), ctx.vararg(F.<K, V>cacheHasPeekValue()));
}
/** {@inheritDoc} */
@@ -559,12 +558,12 @@ public abstract class GridNearCacheAdapter<K, V> extends GridDistributedCacheAda
/** {@inheritDoc} */
@NotNull @Override public Iterator<Cache.Entry<K, V>> iterator() {
return new EntryIterator(nearSet.iterator(),
- F.identityIterator(dhtSet, new P1<Cache.Entry<K, V>>() {
- @Override
- public boolean apply(Cache.Entry<K, V> e) {
+ F.iterator0(dhtSet, false, new P1<Cache.Entry<K, V>>() {
+ @Override public boolean apply(Cache.Entry<K, V> e) {
try {
return GridNearCacheAdapter.super.localPeek(e.getKey(), NEAR_PEEK_MODE, null) == null;
- } catch (IgniteCheckedException ex) {
+ }
+ catch (IgniteCheckedException ex) {
throw new IgniteException(ex);
}
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
index 3b60f96..6130ead 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
@@ -60,6 +60,7 @@ import org.apache.ignite.internal.processors.cache.KeyCacheObject;
import org.apache.ignite.internal.processors.cache.local.GridLocalCacheEntry;
import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx;
import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridUnsafe;
import org.apache.ignite.internal.util.future.GridEmbeddedFuture;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
@@ -188,7 +189,7 @@ public class GridLocalAtomicCache<K, V> extends GridCacheAdapter<K, V> {
@Override public IgniteInternalFuture<V> getAndPutAsync0(K key, V val, @Nullable CacheEntryPredicate... filter) {
A.notNull(key, "key", val, "val");
- return updateAllAsync0(F.asMap(key, val),
+ return updateAllAsync0(F0.asMap(key, val),
null,
null,
true,
@@ -201,7 +202,7 @@ public class GridLocalAtomicCache<K, V> extends GridCacheAdapter<K, V> {
@Override public IgniteInternalFuture<Boolean> putAsync0(K key, V val, @Nullable CacheEntryPredicate... filter) {
A.notNull(key, "key", val, "val");
- return updateAllAsync0(F.asMap(key, val),
+ return updateAllAsync0(F0.asMap(key, val),
null,
null,
false,
@@ -658,7 +659,11 @@ public class GridLocalAtomicCache<K, V> extends GridCacheAdapter<K, V> {
if (keyCheck)
validateCacheKeys(keys);
- Map<? extends K, EntryProcessor<K, V, Object>> invokeMap = CU.invokeMap(keys, entryProcessor);
+ Map<? extends K, EntryProcessor> invokeMap = F.viewAsMap(keys, new C1<K, EntryProcessor>() {
+ @Override public EntryProcessor apply(K k) {
+ return entryProcessor;
+ }
+ });
CacheOperationContext opCtx = ctx.operationContextPerCall();
@@ -724,7 +729,11 @@ public class GridLocalAtomicCache<K, V> extends GridCacheAdapter<K, V> {
if (keyCheck)
validateCacheKeys(keys);
- Map<? extends K, EntryProcessor<K, V, Object>> invokeMap = CU.invokeMap(keys, entryProcessor);
+ Map<? extends K, EntryProcessor> invokeMap = F.viewAsMap(keys, new C1<K, EntryProcessor>() {
+ @Override public EntryProcessor apply(K k) {
+ return entryProcessor;
+ }
+ });
return updateAllAsync0(null,
invokeMap,
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheAtomicStampedImpl.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheAtomicStampedImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheAtomicStampedImpl.java
index 84c4269..f7a82a9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheAtomicStampedImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheAtomicStampedImpl.java
@@ -30,6 +30,7 @@ import org.apache.ignite.internal.GridKernalContext;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
+import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.tostring.GridToStringBuilder;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.internal.CU;
@@ -178,8 +179,8 @@ public final class GridCacheAtomicStampedImpl<T, S> implements GridCacheAtomicSt
checkRemoved();
try {
- return CU.outTx(internalCompareAndSet(F.equalTo(expVal), wrapperClosure(newVal),
- F.equalTo(expStamp), wrapperClosure(newStamp)), ctx);
+ return CU.outTx(internalCompareAndSet(F0.equalTo(expVal), wrapperClosure(newVal),
+ F0.equalTo(expStamp), wrapperClosure(newStamp)), ctx);
}
catch (IgniteCheckedException e) {
throw U.convertException(e);
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java
index b313084..5b8cf86 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java
@@ -45,6 +45,7 @@ import org.apache.ignite.internal.IgniteNodeAttributes;
import org.apache.ignite.internal.processors.query.GridQueryProcessor;
import org.apache.ignite.internal.util.ipc.IpcServerEndpoint;
import org.apache.ignite.internal.util.typedef.C1;
+import org.apache.ignite.internal.util.typedef.CI1;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.X;
import org.apache.ignite.internal.util.typedef.internal.U;
@@ -127,8 +128,11 @@ public class IgfsProcessor extends IgfsProcessorAdapter {
final Map<String, CacheConfiguration> cacheCfgs = new HashMap<>();
- for (CacheConfiguration c : gridCfg.getCacheConfiguration())
- cacheCfgs.put(c.getName(), c);
+ F.forEach(gridCfg.getCacheConfiguration(), new CI1<CacheConfiguration>() {
+ @Override public void apply(CacheConfiguration c) {
+ cacheCfgs.put(c.getName(), c);
+ }
+ });
Collection<IgfsAttributes> attrVals = new ArrayList<>();
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetCacheStore.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetCacheStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetCacheStore.java
index 9f0fd3f..7e65c22 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetCacheStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetCacheStore.java
@@ -235,13 +235,13 @@ public class PlatformDotNetCacheStore<K, V> implements CacheStore<K, V>, Platfor
@Override public Set<Entry<K, V>> entrySet() {
return new AbstractSet<Entry<K, V>>() {
@Override public Iterator<Entry<K, V>> iterator() {
- return F.iteratorReadOnly(entries, new C1<Cache.Entry<? extends K, ? extends V>, Entry<K, V>>() {
+ return F.iterator(entries, new C1<Cache.Entry<? extends K, ? extends V>, Entry<K, V>>() {
private static final long serialVersionUID = 0L;
@Override public Entry<K, V> apply(Cache.Entry<? extends K, ? extends V> entry) {
return new GridMapEntry<>(entry.getKey(), entry.getValue());
}
- });
+ }, true);
}
@Override public int size() {
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/top/GridTopologyCommandHandler.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/top/GridTopologyCommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/top/GridTopologyCommandHandler.java
index 5e12199..297785e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/top/GridTopologyCommandHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/top/GridTopologyCommandHandler.java
@@ -135,7 +135,8 @@ public class GridTopologyCommandHandler extends GridRestCommandHandlerAdapter {
}
else
node = F.find(ctx.discovery().allNodes(), null, new P1<ClusterNode>() {
- @Override public boolean apply(ClusterNode n) {
+ @Override
+ public boolean apply(ClusterNode n) {
return containsIp(n.addresses(), ip);
}
});
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/F0.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/F0.java b/modules/core/src/main/java/org/apache/ignite/internal/util/F0.java
index b819226..7e9e448 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/F0.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/F0.java
@@ -17,6 +17,8 @@
package org.apache.ignite.internal.util;
+import java.util.Collection;
+import java.util.Map;
import java.util.Set;
import java.util.UUID;
import org.apache.ignite.IgniteCheckedException;
@@ -25,9 +27,11 @@ import org.apache.ignite.internal.processors.cache.CacheEntryPredicateAdapter;
import org.apache.ignite.internal.processors.cache.CacheEntrySerializablePredicate;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
+import org.apache.ignite.internal.util.lang.GridFunc;
import org.apache.ignite.internal.util.lang.GridNodePredicate;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.P1;
+import org.apache.ignite.internal.util.typedef.internal.A;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.lang.IgnitePredicate;
import org.jetbrains.annotations.Nullable;
@@ -38,6 +42,79 @@ import org.jetbrains.annotations.Nullable;
*/
public class F0 {
/**
+ * Negates given predicates.
+ * <p>
+ * Gets predicate (not peer-deployable) that evaluates to {@code true} if any of given predicates
+ * evaluates to {@code false}. If all predicates evaluate to {@code true} the
+ * result predicate will evaluate to {@code false}.
+ *
+ * @param p Predicate to negate.
+ * @param <T> Type of the free variable, i.e. the element the predicate is called on.
+ * @return Negated predicate (not peer-deployable).
+ */
+ public static <T> IgnitePredicate<T> not(@Nullable final IgnitePredicate<? super T>... p) {
+ return F.isAlwaysFalse(p) ? F.<T>alwaysTrue() : F.isAlwaysTrue(p) ? F.<T>alwaysFalse() : new P1<T>() {
+ @Override public boolean apply(T t) {
+ return !F.isAll(t, p);
+ }
+ };
+ }
+
+ /**
+ * Gets predicate (not peer-deployable) that evaluates to {@code true} if its free variable is not equal
+ * to {@code target} or both are {@code null}.
+ *
+ * @param target Object to compare free variable to.
+ * @param <T> Type of the free variable, i.e. the element the predicate is called on.
+ * @return Predicate (not peer-deployable) that evaluates to {@code true} if its free variable is not equal
+ * to {@code target} or both are {@code null}.
+ */
+ public static <T> IgnitePredicate<T> notEqualTo(@Nullable final T target) {
+ return new P1<T>() {
+ @Override public boolean apply(T t) {
+ return !F.eq(t, target);
+ }
+ };
+ }
+
+ /**
+ * Gets predicate (not peer-deployable) that returns {@code true} if its free variable
+ * is not contained in given collection.
+ *
+ * @param c Collection to check for containment.
+ * @param <T> Type of the free variable for the predicate and type of the
+ * collection elements.
+ * @return Predicate (not peer-deployable) that returns {@code true} if its free variable is not
+ * contained in given collection.
+ */
+ public static <T> IgnitePredicate<T> notIn(@Nullable final Collection<? extends T> c) {
+ return F.isEmpty(c) ? GridFunc.<T>alwaysTrue() : new P1<T>() {
+ @Override public boolean apply(T t) {
+ assert c != null;
+
+ return !c.contains(t);
+ }
+ };
+ }
+
+ /**
+ * Gets predicate (not perr-deployable) that evaluates to {@code true} if its free variable is equal
+ * to {@code target} or both are {@code null}.
+ *
+ * @param target Object to compare free variable to.
+ * @param <T> Type of the free variable, i.e. the element the predicate is called on.
+ * @return Predicate that evaluates to {@code true} if its free variable is equal to
+ * {@code target} or both are {@code null}.
+ */
+ public static <T> IgnitePredicate<T> equalTo(@Nullable final T target) {
+ return new P1<T>() {
+ @Override public boolean apply(T t) {
+ return F.eq(t, target);
+ }
+ };
+ }
+
+ /**
* @param p1 Filter1.
* @param p2 Filter2.
* @return And filter.
@@ -56,12 +133,16 @@ public class F0 {
if (e1 && e2)
return CU.alwaysTrue0();
- if (e1) {
+ if (e1 && !e2) {
+ assert p2 != null;
+
if (p2.length == 1)
return p2[0];
}
- if (e2) {
+ if (!e1 && e2) {
+ assert p1 != null;
+
if (p1.length == 1)
return p1[0];
}
@@ -69,12 +150,16 @@ public class F0 {
return new CacheEntrySerializablePredicate(new CacheEntryPredicateAdapter() {
@Override public boolean apply(GridCacheEntryEx e) {
if (!e1) {
+ assert p1 != null;
+
for (CacheEntryPredicate p : p1)
if (p != null && !p.apply(e))
return false;
}
if (!e2) {
+ assert p2 != null;
+
for (CacheEntryPredicate p : p2)
if (p != null && !p.apply(e))
return false;
@@ -101,11 +186,15 @@ public class F0 {
@Override public void prepareMarshal(GridCacheContext ctx) throws IgniteCheckedException {
if (!e1) {
+ assert p1 != null;
+
for (CacheEntryPredicate p : p1)
p.prepareMarshal(ctx);
}
if (!e2) {
+ assert p2 != null;
+
for (CacheEntryPredicate p : p2)
p.prepareMarshal(ctx);
}
@@ -114,6 +203,63 @@ public class F0 {
}
/**
+ * @param p Filter1.
+ * @param ps Filter2.
+ * @return And filter.
+ */
+ public static CacheEntryPredicate and0(
+ @Nullable final CacheEntryPredicate p,
+ @Nullable final CacheEntryPredicate... ps) {
+ if (p == null && F.isEmptyOrNulls(ps))
+ return CU.alwaysTrue0();
+
+ if (F.isAlwaysFalse(p) && F.isAlwaysFalse(ps))
+ return CU.alwaysFalse0();
+
+ if (F.isAlwaysTrue(p) && F.isAlwaysTrue(ps))
+ return CU.alwaysTrue0();
+
+ return new CacheEntrySerializablePredicate(new CacheEntryPredicateAdapter() {
+ @Override public boolean apply(GridCacheEntryEx e) {
+ assert ps != null;
+
+ if (p != null && !p.apply(e))
+ return false;
+
+ for (CacheEntryPredicate p : ps) {
+ if (p != null && !p.apply(e))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override public void entryLocked(boolean locked) {
+ assert ps != null;
+
+ if (p != null)
+ p.entryLocked(locked);
+
+ for (CacheEntryPredicate p : ps) {
+ if (p != null)
+ p.entryLocked(locked);
+ }
+ }
+
+ @Override public void prepareMarshal(GridCacheContext ctx) throws IgniteCheckedException {
+ assert ps != null;
+
+ if (p != null)
+ p.prepareMarshal(ctx);
+
+ for (CacheEntryPredicate p : ps)
+ if (p != null)
+ p.prepareMarshal(ctx);
+ }
+ });
+ }
+
+ /**
* Get a predicate (non peer-deployable) that evaluates to {@code true} if each of its component predicates
* evaluates to {@code true}. The components are evaluated in order they are supplied.
* Evaluation will be stopped as soon as first predicate evaluates to {@code false}.
@@ -141,12 +287,16 @@ public class F0 {
if (e1 && e2)
return F.alwaysTrue();
- if (e1) {
+ if (e1 && !e2) {
+ assert p2 != null;
+
if (p2.length == 1)
return (IgnitePredicate<T>)p2[0];
}
- if (e2) {
+ if (!e1 && e2) {
+ assert p1 != null;
+
if (p1.length == 1)
return (IgnitePredicate<T>)p1[0];
}
@@ -155,11 +305,15 @@ public class F0 {
Set<UUID> ids = new GridLeanSet<>();
if (!e1) {
+ assert p1 != null;
+
for (IgnitePredicate<? super T> p : p1)
ids.addAll(((GridNodePredicate)p).nodeIds());
}
if (!e2) {
+ assert p2 != null;
+
for (IgnitePredicate<? super T> p : p2)
ids.addAll(((GridNodePredicate)p).nodeIds());
}
@@ -171,12 +325,16 @@ public class F0 {
return new P1<T>() {
@Override public boolean apply(T t) {
if (!e1) {
+ assert p1 != null;
+
for (IgnitePredicate<? super T> p : p1)
if (p != null && !p.apply(t))
return false;
}
if (!e2) {
+ assert p2 != null;
+
for (IgnitePredicate<? super T> p : p2)
if (p != null && !p.apply(t))
return false;
@@ -189,6 +347,148 @@ public class F0 {
}
/**
+ * Get a predicate (not peer-deployable) that evaluates to {@code true} if each of its component predicates
+ * evaluates to {@code true}. The components are evaluated in order they are supplied.
+ * Evaluation will be stopped as soon as first predicate evaluates to {@code false}.
+ * Passed in predicates are NOT copied. If no predicates are passed in the returned
+ * predicate will always evaluate to {@code false}.
+ *
+ * @param ps Passed in predicate. If none provided - always-{@code false} predicate is
+ * returned.
+ * @param <T> Type of the free variable, i.e. the element the predicate is called on.
+ * @return Predicate that evaluates to {@code true} if each of its component predicates
+ * evaluates to {@code true}.
+ */
+ @SuppressWarnings("unchecked")
+ public static <T> IgnitePredicate<T> and(
+ @Nullable final IgnitePredicate<? super T> p,
+ @Nullable final IgnitePredicate<? super T>... ps
+ ) {
+ if (p == null && F.isEmptyOrNulls(ps))
+ return F.alwaysTrue();
+
+ if (F.isAlwaysFalse(p) && F.isAlwaysFalse(ps))
+ return F.alwaysFalse();
+
+ if (F.isAlwaysTrue(p) && F.isAlwaysTrue(ps))
+ return F.alwaysTrue();
+
+ if (isAllNodePredicates(p) && isAllNodePredicates(ps)) {
+ assert ps != null;
+
+ Set<UUID> ids = new GridLeanSet<>();
+
+ for (IgnitePredicate<? super T> p0 : ps) {
+ Collection<UUID> list = ((GridNodePredicate)p0).nodeIds();
+
+ if (ids.isEmpty())
+ ids.addAll(list);
+ else
+ ids.retainAll(list);
+ }
+
+ Collection<UUID> list = ((GridNodePredicate)p).nodeIds();
+
+ if (ids.isEmpty())
+ ids.addAll(list);
+ else
+ ids.retainAll(list);
+
+ // T must be <T extends GridNode>.
+ return (IgnitePredicate<T>)new GridNodePredicate(ids);
+ }
+ else {
+ return new P1<T>() {
+ @Override public boolean apply(T t) {
+ assert ps != null;
+
+ if (p != null && !p.apply(t))
+ return false;
+
+ for (IgnitePredicate<? super T> p : ps)
+ if (p != null && !p.apply(t))
+ return false;
+
+ return true;
+ }
+ };
+ }
+ }
+
+ /**
+ * Gets predicate (not peer-deployable) that returns {@code true} if its free variable is contained
+ * in given collection.
+ *
+ * @param c Collection to check for containment.
+ * @param <T> Type of the free variable for the predicate and type of the
+ * collection elements.
+ * @return Predicate (not peer-deployable) that returns {@code true} if its free variable is
+ * contained in given collection.
+ */
+ public static <T> IgnitePredicate<T> in(@Nullable final Collection<? extends T> c) {
+ return F.isEmpty(c) ? GridFunc.<T>alwaysFalse() : new P1<T>() {
+ @Override public boolean apply(T t) {
+ assert c != null;
+
+ return c.contains(t);
+ }
+ };
+ }
+
+ /**
+ * Provides predicate (not peer-deployable) which returns {@code true} if it receives an element
+ * that is contained in the passed in collection.
+ *
+ * @param c Collection used for predicate filter.
+ * @param <T> Element type.
+ * @return Predicate which returns {@code true} if it receives an element
+ * that is contained in the passed in collection.
+ */
+ public static <T> IgnitePredicate<T> contains(@Nullable final Collection<T> c) {
+ return c == null || c.isEmpty() ? GridFunc.<T>alwaysFalse() : new P1<T>() {
+ @Override public boolean apply(T t) {
+ return c.contains(t);
+ }
+ };
+ }
+
+ /**
+ * Provides predicate (not peer-deployable) which returns {@code true} if it receives an element
+ * that is not contained in the passed in collection.
+ *
+ * @param c Collection used for predicate filter.
+ * @param <T> Element type.
+ * @return Predicate which returns {@code true} if it receives an element
+ * that is not contained in the passed in collection.
+ */
+ public static <T> IgnitePredicate<T> notContains(@Nullable final Collection<T> c) {
+ return c == null || c.isEmpty() ? GridFunc.<T>alwaysTrue() : new P1<T>() {
+ @Override public boolean apply(T t) {
+ return !c.contains(t);
+ }
+ };
+ }
+
+ /**
+ * Tests if all passed in predicates are instances of {@link GridNodePredicate} class.
+ *
+ * @param ps Collection of predicates to test.
+ * @return {@code True} if all passed in predicates are instances of {@link GridNodePredicate} class.
+ */
+ public static boolean isAllNodePredicates(@Nullable Iterable<? extends IgnitePredicate<?>> ps) {
+ if (F.isEmpty(ps))
+ return false;
+
+ assert ps != null;
+
+ for (IgnitePredicate<?> p : ps)
+ if (!(p instanceof GridNodePredicate))
+ return false;
+
+ return true;
+ }
+
+ /**
* Tests if all passed in predicates are instances of {@link GridNodePredicate} class.
*
* @param ps Collection of predicates to test.
@@ -198,10 +498,27 @@ public class F0 {
if (F.isEmpty(ps))
return false;
+ assert ps != null;
+
for (IgnitePredicate<?> p : ps)
if (!(p instanceof GridNodePredicate))
return false;
return true;
}
+
+ /**
+ * Creates map with given values, adding a strict not-null check for value.
+ *
+ * @param key Key.
+ * @param val Value.
+ * @param <K> Key's type.
+ * @param <V> Value's type.
+ * @return Created map.
+ */
+ public static <K, V> Map<K, V> asMap(K key, V val) {
+ A.notNull(val, "val");
+
+ return F.asMap(key, val);
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/GridExecutionStatistics.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/GridExecutionStatistics.java b/modules/core/src/main/java/org/apache/ignite/internal/util/GridExecutionStatistics.java
index fc2e3a2..a23fe28 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/GridExecutionStatistics.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/GridExecutionStatistics.java
@@ -71,13 +71,13 @@ public class GridExecutionStatistics {
long time = startTime.get().get2();
- AtomicInteger cnt = F.addIfAbsent(cntMap, name, new AtomicInteger());
+ AtomicInteger cnt = F.addIfAbsent(cntMap, name, F.newAtomicInt());
assert cnt != null;
cnt.incrementAndGet();
- AtomicLong d = F.addIfAbsent(durationMap, name, new AtomicLong());
+ AtomicLong d = F.addIfAbsent(durationMap, name, F.newAtomicLong());
assert d != null;
http://git-wip-us.apache.org/repos/asf/ignite/blob/ddbe2d59/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
index a92ccab..480859d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
@@ -8508,7 +8508,7 @@ public abstract class IgniteUtils {
throw new IgniteCheckedException("Addresses can not be resolved [addr=" + addrs +
", hostNames=" + hostNames + ']');
- return Collections.unmodifiableList(res);
+ return F.viewListReadOnly(res, F.<InetAddress>identity());
}
/**
@@ -8555,7 +8555,7 @@ public abstract class IgniteUtils {
res.add(new InetSocketAddress(addr, port));
}
- return Collections.unmodifiableList(res);
+ return F.viewListReadOnly(res, F.<InetSocketAddress>identity());
}
/**
[06/11] ignite git commit: IGNITE-2308: Fixed HadoopClassLoader
dependency resolution. This closes #391.
Posted by vo...@apache.org.
IGNITE-2308: Fixed HadoopClassLoader dependency resolution. This closes #391.
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/012ca730
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/012ca730
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/012ca730
Branch: refs/heads/ignite-2314
Commit: 012ca7308bf125d2c90d68ef7a8bc75aeb84bf53
Parents: ddbe2d5
Author: iveselovskiy <iv...@gridgain.com>
Authored: Mon Jan 4 10:47:28 2016 +0400
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Mon Jan 4 10:47:28 2016 +0400
----------------------------------------------------------------------
.../processors/hadoop/HadoopClassLoader.java | 636 ++++++++++++++-----
.../hadoop/HadoopClassLoaderTest.java | 101 ++-
.../hadoop/deps/CircularWIthHadoop.java | 32 +
.../hadoop/deps/CircularWithoutHadoop.java | 27 +
.../processors/hadoop/deps/WithCast.java | 41 ++
.../hadoop/deps/WithClassAnnotation.java | 28 +
.../hadoop/deps/WithConstructorInvocation.java | 31 +
.../processors/hadoop/deps/WithExtends.java | 27 +
.../processors/hadoop/deps/WithField.java | 29 +
.../processors/hadoop/deps/WithImplements.java | 36 ++
.../hadoop/deps/WithIndirectField.java | 27 +
.../processors/hadoop/deps/WithInitializer.java | 33 +
.../processors/hadoop/deps/WithInnerClass.java | 31 +
.../hadoop/deps/WithLocalVariable.java | 38 ++
.../hadoop/deps/WithMethodAnnotation.java | 32 +
.../hadoop/deps/WithMethodArgument.java | 31 +
.../hadoop/deps/WithMethodCheckedException.java | 31 +
.../hadoop/deps/WithMethodInvocation.java | 31 +
.../hadoop/deps/WithMethodReturnType.java | 31 +
.../hadoop/deps/WithMethodRuntimeException.java | 31 +
.../processors/hadoop/deps/WithOuterClass.java | 38 ++
.../hadoop/deps/WithParameterAnnotation.java | 31 +
.../processors/hadoop/deps/WithStaticField.java | 29 +
.../hadoop/deps/WithStaticInitializer.java | 34 +
.../processors/hadoop/deps/Without.java | 25 +
.../testsuites/IgniteHadoopTestSuite.java | 3 +
26 files changed, 1279 insertions(+), 185 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
index f12af46..735133f 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java
@@ -30,7 +30,7 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
+
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.internal.processors.hadoop.v2.HadoopDaemon;
import org.apache.ignite.internal.processors.hadoop.v2.HadoopNativeCodeLoader;
@@ -40,13 +40,16 @@ import org.apache.ignite.internal.util.typedef.internal.S;
import org.jetbrains.annotations.Nullable;
import org.jsr166.ConcurrentHashMap8;
import org.objectweb.asm.AnnotationVisitor;
+import org.objectweb.asm.Attribute;
import org.objectweb.asm.ClassReader;
import org.objectweb.asm.ClassVisitor;
import org.objectweb.asm.ClassWriter;
import org.objectweb.asm.FieldVisitor;
+import org.objectweb.asm.Handle;
import org.objectweb.asm.Label;
import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
import org.objectweb.asm.commons.Remapper;
import org.objectweb.asm.commons.RemappingClassAdapter;
@@ -125,10 +128,14 @@ public class HadoopClassLoader extends URLClassLoader {
* @return {@code true} if we need to check this class.
*/
private static boolean isHadoopIgfs(String cls) {
- String ignitePackagePrefix = "org.apache.ignite";
- int len = ignitePackagePrefix.length();
+ String ignitePkgPrefix = "org.apache.ignite";
+
+ int len = ignitePkgPrefix.length();
- return cls.startsWith(ignitePackagePrefix) && (cls.indexOf("igfs.", len) != -1 || cls.indexOf(".fs.", len) != -1 || cls.indexOf("hadoop.", len) != -1);
+ return cls.startsWith(ignitePkgPrefix) && (
+ cls.indexOf("igfs.", len) != -1 ||
+ cls.indexOf(".fs.", len) != -1 ||
+ cls.indexOf("hadoop.", len) != -1);
}
/**
@@ -159,7 +166,7 @@ public class HadoopClassLoader extends URLClassLoader {
Boolean hasDeps = cache.get(name);
if (hasDeps == null) {
- hasDeps = hasExternalDependencies(name, new HashSet<String>());
+ hasDeps = hasExternalDependencies(name);
cache.put(name, hasDeps);
}
@@ -266,10 +273,30 @@ public class HadoopClassLoader extends URLClassLoader {
}
/**
+ * Check whether class has external dependencies on Hadoop.
+ *
* @param clsName Class name.
+ * @return {@code True} if class has external dependencies.
+ */
+ boolean hasExternalDependencies(String clsName) {
+ CollectingContext ctx = new CollectingContext();
+
+ ctx.annVisitor = new CollectingAnnotationVisitor(ctx);
+ ctx.mthdVisitor = new CollectingMethodVisitor(ctx, ctx.annVisitor);
+ ctx.fldVisitor = new CollectingFieldVisitor(ctx, ctx.annVisitor);
+ ctx.clsVisitor = new CollectingClassVisitor(ctx, ctx.annVisitor, ctx.mthdVisitor, ctx.fldVisitor);
+
+ return hasExternalDependencies(clsName, ctx);
+ }
+
+ /**
+ * Check whether class has external dependencies on Hadoop.
+ *
+ * @param clsName Class name.
+ * @param ctx Context.
* @return {@code true} If the class has external dependencies.
*/
- boolean hasExternalDependencies(final String clsName, final Set<String> visited) {
+ boolean hasExternalDependencies(String clsName, CollectingContext ctx) {
if (isHadoop(clsName)) // Hadoop must not be in classpath but Idea sucks, so filtering explicitly as external.
return true;
@@ -291,157 +318,14 @@ public class HadoopClassLoader extends URLClassLoader {
throw new RuntimeException("Failed to read class: " + clsName, e);
}
- visited.add(clsName);
-
- final AtomicBoolean hasDeps = new AtomicBoolean();
-
- rdr.accept(new ClassVisitor(Opcodes.ASM4) {
- AnnotationVisitor av = new AnnotationVisitor(Opcodes.ASM4) {
- // TODO
- };
-
- FieldVisitor fv = new FieldVisitor(Opcodes.ASM4) {
- @Override public AnnotationVisitor visitAnnotation(String desc, boolean b) {
- onType(desc);
-
- return av;
- }
- };
-
- MethodVisitor mv = new MethodVisitor(Opcodes.ASM4) {
- @Override public AnnotationVisitor visitAnnotation(String desc, boolean b) {
- onType(desc);
-
- return av;
- }
-
- @Override public AnnotationVisitor visitParameterAnnotation(int i, String desc, boolean b) {
- onType(desc);
-
- return av;
- }
-
- @Override public AnnotationVisitor visitAnnotationDefault() {
- return av;
- }
-
- @Override public void visitFieldInsn(int i, String owner, String name, String desc) {
- onType(owner);
- onType(desc);
- }
-
- @Override public void visitFrame(int i, int i2, Object[] locTypes, int i3, Object[] stackTypes) {
- for (Object o : locTypes) {
- if (o instanceof String)
- onType((String)o);
- }
-
- for (Object o : stackTypes) {
- if (o instanceof String)
- onType((String)o);
- }
- }
-
- @Override public void visitLocalVariable(String name, String desc, String signature, Label lb,
- Label lb2, int i) {
- onType(desc);
- }
-
- @Override public void visitMethodInsn(int i, String owner, String name, String desc) {
- onType(owner);
- }
-
- @Override public void visitMultiANewArrayInsn(String desc, int dim) {
- onType(desc);
- }
-
- @Override public void visitTryCatchBlock(Label lb, Label lb2, Label lb3, String e) {
- onType(e);
- }
- };
-
- void onClass(String depCls) {
- assert validateClassName(depCls) : depCls;
-
- if (depCls.startsWith("java.")) // Filter out platform classes.
- return;
-
- if (visited.contains(depCls))
- return;
-
- Boolean res = cache.get(depCls);
-
- if (res == Boolean.TRUE || (res == null && hasExternalDependencies(depCls, visited)))
- hasDeps.set(true);
- }
-
- void onType(String type) {
- if (type == null)
- return;
-
- int off = 0;
-
- while (type.charAt(off) == '[')
- off++; // Handle arrays.
-
- if (off != 0)
- type = type.substring(off);
-
- if (type.length() == 1)
- return; // Get rid of primitives.
-
- if (type.charAt(type.length() - 1) == ';') {
- assert type.charAt(0) == 'L' : type;
-
- type = type.substring(1, type.length() - 1);
- }
-
- type = type.replace('/', '.');
-
- onClass(type);
- }
-
- @Override public void visit(int i, int i2, String name, String signature, String superName,
- String[] ifaces) {
- onType(superName);
-
- if (ifaces != null) {
- for (String iface : ifaces)
- onType(iface);
- }
- }
-
- @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
- onType(desc);
-
- return av;
- }
-
- @Override public void visitInnerClass(String name, String outerName, String innerName, int i) {
- onType(name);
- }
-
- @Override public FieldVisitor visitField(int i, String name, String desc, String signature, Object val) {
- onType(desc);
+ ctx.visited.add(clsName);
- return fv;
- }
+ rdr.accept(ctx.clsVisitor, 0);
- @Override public MethodVisitor visitMethod(int i, String name, String desc, String signature,
- String[] exceptions) {
- if (exceptions != null) {
- for (String e : exceptions)
- onType(e);
- }
-
- return mv;
- }
- }, 0);
-
- if (hasDeps.get()) // We already know that we have dependencies, no need to check parent.
+ if (ctx.found) // We already know that we have dependencies, no need to check parent.
return true;
- // Here we are known to not have any dependencies but possibly we have a parent which have them.
+ // Here we are known to not have any dependencies but possibly we have a parent which has them.
int idx = clsName.lastIndexOf('$');
if (idx == -1) // No parent class.
@@ -449,13 +333,13 @@ public class HadoopClassLoader extends URLClassLoader {
String parentCls = clsName.substring(0, idx);
- if (visited.contains(parentCls))
+ if (ctx.visited.contains(parentCls))
return false;
Boolean res = cache.get(parentCls);
if (res == null)
- res = hasExternalDependencies(parentCls, visited);
+ res = hasExternalDependencies(parentCls, ctx);
return res;
}
@@ -616,4 +500,446 @@ public class HadoopClassLoader extends URLClassLoader {
public String name() {
return name;
}
+
+ /**
+ * Context for dependencies collection.
+ */
+ private class CollectingContext {
+ /** Visited classes. */
+ private final Set<String> visited = new HashSet<>();
+
+ /** Whether dependency found. */
+ private boolean found;
+
+ /** Annotation visitor. */
+ private AnnotationVisitor annVisitor;
+
+ /** Method visitor. */
+ private MethodVisitor mthdVisitor;
+
+ /** Field visitor. */
+ private FieldVisitor fldVisitor;
+
+ /** Class visitor. */
+ private ClassVisitor clsVisitor;
+
+ /**
+ * Processes a method descriptor
+ * @param methDesc The method desc String.
+ */
+ void onMethodsDesc(final String methDesc) {
+ // Process method return type:
+ onType(Type.getReturnType(methDesc));
+
+ if (found)
+ return;
+
+ // Process method argument types:
+ for (Type t: Type.getArgumentTypes(methDesc)) {
+ onType(t);
+
+ if (found)
+ return;
+ }
+ }
+
+ /**
+ * Processes dependencies of a class.
+ *
+ * @param depCls The class name as dot-notated FQN.
+ */
+ void onClass(final String depCls) {
+ assert depCls.indexOf('/') == -1 : depCls; // class name should be fully converted to dot notation.
+ assert depCls.charAt(0) != 'L' : depCls;
+ assert validateClassName(depCls) : depCls;
+
+ if (depCls.startsWith("java.") || depCls.startsWith("javax.")) // Filter out platform classes.
+ return;
+
+ if (visited.contains(depCls))
+ return;
+
+ Boolean res = cache.get(depCls);
+
+ if (res == Boolean.TRUE || (res == null && hasExternalDependencies(depCls, this)))
+ found = true;
+ }
+
+ /**
+ * Analyses dependencies of given type.
+ *
+ * @param t The type to process.
+ */
+ void onType(Type t) {
+ if (t == null)
+ return;
+
+ int sort = t.getSort();
+
+ switch (sort) {
+ case Type.ARRAY:
+ onType(t.getElementType());
+
+ break;
+
+ case Type.OBJECT:
+ onClass(t.getClassName());
+
+ break;
+ }
+ }
+
+ /**
+ * Analyses dependencies of given object type.
+ *
+ * @param objType The object type to process.
+ */
+ void onInternalTypeName(String objType) {
+ if (objType == null)
+ return;
+
+ assert objType.length() > 1 : objType;
+
+ if (objType.charAt(0) == '[')
+ // handle array. In this case this is a type descriptor notation, like "[Ljava/lang/Object;"
+ onType(objType);
+ else {
+ assert objType.indexOf('.') == -1 : objType; // Must be slash-separated FQN.
+
+ String clsName = objType.replace('/', '.'); // Convert it to dot notation.
+
+ onClass(clsName); // Process.
+ }
+ }
+
+ /**
+ * Type description analyser.
+ *
+ * @param desc The description.
+ */
+ void onType(String desc) {
+ if (!F.isEmpty(desc)) {
+ if (desc.length() <= 1)
+ return; // Optimization: filter out primitive types in early stage.
+
+ Type t = Type.getType(desc);
+
+ onType(t);
+ }
+ }
+ }
+
+ /**
+ * Annotation visitor.
+ */
+ private static class CollectingAnnotationVisitor extends AnnotationVisitor {
+ /** */
+ final CollectingContext ctx;
+
+ /**
+ * Annotation visitor.
+ *
+ * @param ctx The collector.
+ */
+ CollectingAnnotationVisitor(CollectingContext ctx) {
+ super(Opcodes.ASM4);
+
+ this.ctx = ctx;
+ }
+
+ /** {@inheritDoc} */
+ @Override public AnnotationVisitor visitAnnotation(String name, String desc) {
+ if (ctx.found)
+ return null;
+
+ ctx.onType(desc);
+
+ return this;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitEnum(String name, String desc, String val) {
+ if (ctx.found)
+ return;
+
+ ctx.onType(desc);
+ }
+
+ /** {@inheritDoc} */
+ @Override public AnnotationVisitor visitArray(String name) {
+ return ctx.found ? null : this;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visit(String name, Object val) {
+ if (ctx.found)
+ return;
+
+ if (val instanceof Type)
+ ctx.onType((Type)val);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitEnd() {
+ // No-op.
+ }
+ }
+
+ /**
+ * Field visitor.
+ */
+ private static class CollectingFieldVisitor extends FieldVisitor {
+ /** Collector. */
+ private final CollectingContext ctx;
+
+ /** Annotation visitor. */
+ private final AnnotationVisitor av;
+
+ /**
+ * Constructor.
+ */
+ CollectingFieldVisitor(CollectingContext ctx, AnnotationVisitor av) {
+ super(Opcodes.ASM4);
+
+ this.ctx = ctx;
+ this.av = av;
+ }
+
+ /** {@inheritDoc} */
+ @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+ if (ctx.found)
+ return null;
+
+ ctx.onType(desc);
+
+ return ctx.found ? null : av;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitAttribute(Attribute attr) {
+ // No-op.
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitEnd() {
+ // No-op.
+ }
+ }
+
+ /**
+ * Class visitor.
+ */
+ private static class CollectingClassVisitor extends ClassVisitor {
+ /** Collector. */
+ private final CollectingContext ctx;
+
+ /** Annotation visitor. */
+ private final AnnotationVisitor av;
+
+ /** Method visitor. */
+ private final MethodVisitor mv;
+
+ /** Field visitor. */
+ private final FieldVisitor fv;
+
+ /**
+ * Constructor.
+ *
+ * @param ctx Collector.
+ * @param av Annotation visitor.
+ * @param mv Method visitor.
+ * @param fv Field visitor.
+ */
+ CollectingClassVisitor(CollectingContext ctx, AnnotationVisitor av, MethodVisitor mv, FieldVisitor fv) {
+ super(Opcodes.ASM4);
+
+ this.ctx = ctx;
+ this.av = av;
+ this.mv = mv;
+ this.fv = fv;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visit(int i, int i2, String name, String signature, String superName, String[] ifaces) {
+ if (ctx.found)
+ return;
+
+ ctx.onInternalTypeName(superName);
+
+ if (ctx.found)
+ return;
+
+ if (ifaces != null) {
+ for (String iface : ifaces) {
+ ctx.onInternalTypeName(iface);
+
+ if (ctx.found)
+ return;
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+ if (ctx.found)
+ return null;
+
+ ctx.onType(desc);
+
+ return ctx.found ? null : av;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitInnerClass(String name, String outerName, String innerName, int i) {
+ if (ctx.found)
+ return;
+
+ ctx.onInternalTypeName(name);
+ }
+
+ /** {@inheritDoc} */
+ @Override public FieldVisitor visitField(int i, String name, String desc, String signature, Object val) {
+ if (ctx.found)
+ return null;
+
+ ctx.onType(desc);
+
+ return ctx.found ? null : fv;
+ }
+
+ /** {@inheritDoc} */
+ @Override public MethodVisitor visitMethod(int i, String name, String desc, String signature,
+ String[] exceptions) {
+ if (ctx.found)
+ return null;
+
+ ctx.onMethodsDesc(desc);
+
+ // Process declared method exceptions:
+ if (exceptions != null) {
+ for (String e : exceptions)
+ ctx.onInternalTypeName(e);
+ }
+
+ return ctx.found ? null : mv;
+ }
+ }
+
+ /**
+ * Method visitor.
+ */
+ private static class CollectingMethodVisitor extends MethodVisitor {
+ /** Collector. */
+ private final CollectingContext ctx;
+
+ /** Annotation visitor. */
+ private final AnnotationVisitor av;
+
+ /**
+ * Constructor.
+ *
+ * @param ctx Collector.
+ * @param av Annotation visitor.
+ */
+ private CollectingMethodVisitor(CollectingContext ctx, AnnotationVisitor av) {
+ super(Opcodes.ASM4);
+
+ this.ctx = ctx;
+ this.av = av;
+ }
+
+ /** {@inheritDoc} */
+ @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+ if (ctx.found)
+ return null;
+
+ ctx.onType(desc);
+
+ return ctx.found ? null : av;
+ }
+
+ /** {@inheritDoc} */
+ @Override public AnnotationVisitor visitParameterAnnotation(int i, String desc, boolean b) {
+ if (ctx.found)
+ return null;
+
+ ctx.onType(desc);
+
+ return ctx.found ? null : av;
+ }
+
+ /** {@inheritDoc} */
+ @Override public AnnotationVisitor visitAnnotationDefault() {
+ return ctx.found ? null : av;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitFieldInsn(int opcode, String owner, String name, String desc) {
+ if (ctx.found)
+ return;
+
+ ctx.onInternalTypeName(owner);
+
+ if (ctx.found)
+ return;
+
+ ctx.onType(desc);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitInvokeDynamicInsn(String name, String desc, Handle bsm, Object... bsmArgs) {
+ // No-op.
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitFrame(int type, int nLoc, Object[] locTypes, int nStack, Object[] stackTypes) {
+ // No-op.
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitLocalVariable(String name, String desc, String signature, Label lb,
+ Label lb2, int i) {
+ if (ctx.found)
+ return;
+
+ ctx.onType(desc);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitMethodInsn(int i, String owner, String name, String desc) {
+ if (ctx.found)
+ return;
+
+ ctx.onInternalTypeName(owner);
+
+ if (ctx.found)
+ return;
+
+ ctx.onMethodsDesc(desc);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitMultiANewArrayInsn(String desc, int dim) {
+ if (ctx.found)
+ return;
+
+ ctx.onType(desc);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitTryCatchBlock(Label start, Label end, Label hndl, String typeStr) {
+ if (ctx.found)
+ return;
+
+ ctx.onInternalTypeName(typeStr);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void visitTypeInsn(int opcode, String type) {
+ if (ctx.found)
+ return;
+
+ ctx.onInternalTypeName(type);
+ }
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
index 085dd45..55fac2c 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
@@ -17,53 +17,94 @@
package org.apache.ignite.internal.processors.hadoop;
+import javax.security.auth.AuthPermission;
import junit.framework.TestCase;
-import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.ignite.internal.processors.hadoop.deps.CircularWIthHadoop;
+import org.apache.ignite.internal.processors.hadoop.deps.CircularWithoutHadoop;
+import org.apache.ignite.internal.processors.hadoop.deps.WithIndirectField;
+import org.apache.ignite.internal.processors.hadoop.deps.WithCast;
+import org.apache.ignite.internal.processors.hadoop.deps.WithClassAnnotation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithConstructorInvocation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodCheckedException;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodRuntimeException;
+import org.apache.ignite.internal.processors.hadoop.deps.WithExtends;
+import org.apache.ignite.internal.processors.hadoop.deps.WithField;
+import org.apache.ignite.internal.processors.hadoop.deps.WithImplements;
+import org.apache.ignite.internal.processors.hadoop.deps.WithInitializer;
+import org.apache.ignite.internal.processors.hadoop.deps.WithInnerClass;
+import org.apache.ignite.internal.processors.hadoop.deps.WithLocalVariable;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodAnnotation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodInvocation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodArgument;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodReturnType;
+import org.apache.ignite.internal.processors.hadoop.deps.WithOuterClass;
+import org.apache.ignite.internal.processors.hadoop.deps.WithParameterAnnotation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithStaticField;
+import org.apache.ignite.internal.processors.hadoop.deps.WithStaticInitializer;
+import org.apache.ignite.internal.processors.hadoop.deps.Without;
/**
- *
+ * Tests for Hadoop classloader.
*/
public class HadoopClassLoaderTest extends TestCase {
/** */
- HadoopClassLoader ldr = new HadoopClassLoader(null, "test");
+ final HadoopClassLoader ldr = new HadoopClassLoader(null, "test");
/**
* @throws Exception If failed.
*/
public void testClassLoading() throws Exception {
- assertNotSame(Test1.class, ldr.loadClass(Test1.class.getName()));
- assertNotSame(Test2.class, ldr.loadClass(Test2.class.getName()));
- assertSame(Test3.class, ldr.loadClass(Test3.class.getName()));
- }
+ assertNotSame(CircularWIthHadoop.class, ldr.loadClass(CircularWIthHadoop.class.getName()));
+ assertNotSame(CircularWithoutHadoop.class, ldr.loadClass(CircularWithoutHadoop.class.getName()));
-// public void testDependencySearch() {
-// assertTrue(ldr.hasExternalDependencies(Test1.class.getName(), new HashSet<String>()));
-// assertTrue(ldr.hasExternalDependencies(Test2.class.getName(), new HashSet<String>()));
-// }
+ assertSame(Without.class, ldr.loadClass(Without.class.getName()));
+ }
/**
- *
+ * Test dependency search.
*/
- private static class Test1 {
- /** */
- Test2 t2;
+ public void testDependencySearch() {
+ // Positive cases:
+ final Class[] positiveClasses = {
+ Configuration.class,
+ HadoopUtils.class,
+ WithStaticField.class,
+ WithCast.class,
+ WithClassAnnotation.class,
+ WithConstructorInvocation.class,
+ WithMethodCheckedException.class,
+ WithMethodRuntimeException.class,
+ WithExtends.class,
+ WithField.class,
+ WithImplements.class,
+ WithInitializer.class,
+ WithInnerClass.class,
+ WithOuterClass.InnerNoHadoop.class,
+ WithLocalVariable.class,
+ WithMethodAnnotation.class,
+ WithMethodInvocation.class,
+ WithMethodArgument.class,
+ WithMethodReturnType.class,
+ WithParameterAnnotation.class,
+ WithStaticField.class,
+ WithStaticInitializer.class,
+ WithIndirectField.class,
+ CircularWIthHadoop.class,
+ CircularWithoutHadoop.class,
+ };
- /** */
- Job[][] jobs = new Job[4][4];
- }
+ for (Class c: positiveClasses)
+ assertTrue(c.getName(), ldr.hasExternalDependencies(c.getName()));
- /**
- *
- */
- private static abstract class Test2 {
- /** */
- abstract Test1 t1();
- }
+ // Negative cases:
+ final Class[] negativeClasses = {
+ Object.class,
+ AuthPermission.class,
+ Without.class,
+ };
- /**
- *
- */
- private static class Test3 {
- // No-op.
+ for (Class c: negativeClasses)
+ assertFalse(c.getName(), ldr.hasExternalDependencies(c.getName()));
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java
new file mode 100644
index 0000000..c3aa7d9
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.mapreduce.Job;
+
+/**
+ * Class has a direct Hadoop dependency and a circular dependency on another class.
+ */
+@SuppressWarnings("unused")
+public class CircularWIthHadoop {
+ /** */
+ private Job[][] jobs = new Job[4][4];
+
+ /** */
+ private CircularWithoutHadoop y;
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java
new file mode 100644
index 0000000..93d659c
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Does not have direct Hadoop dependency, but has a circular
+ */
+@SuppressWarnings("unused")
+public class CircularWithoutHadoop {
+ /** */
+ private CircularWIthHadoop x;
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java
new file mode 100644
index 0000000..5b1e8e0
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Class contains casting to a Hadoop type.
+ */
+@SuppressWarnings("unused")
+public abstract class WithCast<T> {
+ /** */
+ public abstract T create();
+
+ /** */
+ public void consume(T t) {
+ // noop
+ }
+
+ /** */
+ void test(WithCast<FileSystem> c) {
+ FileSystem fs = c.create();
+
+ c.consume(fs);
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java
new file mode 100644
index 0000000..a9ecae0
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Class has Hadoop annotation.
+ */
+@SuppressWarnings("unused")
+@InterfaceAudience.Public
+public class WithClassAnnotation {
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java
new file mode 100644
index 0000000..98c8991
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Invokes a Hadoop type constructor.
+ */
+@SuppressWarnings("unused")
+public class WithConstructorInvocation {
+ /** */
+ private void foo() {
+ Object x = new Configuration();
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java
new file mode 100644
index 0000000..80c99e1
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.LocalFileSystem;
+
+/**
+ * Class extends a Hadoop class.
+ */
+public class WithExtends extends LocalFileSystem {
+ // noop
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java
new file mode 100644
index 0000000..dd979db
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Has a Hadoop field.
+ */
+@SuppressWarnings("unused")
+public class WithField {
+ /** */
+ private Configuration conf;
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java
new file mode 100644
index 0000000..c2d8e5b
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Implements a Hadoop interface.
+ */
+public class WithImplements implements Configurable {
+ /** {@inheritDoc} */
+ @Override public void setConf(Configuration conf) {
+ // noop
+ }
+
+ /** {@inheritDoc} */
+ @Override public Configuration getConf() {
+ return null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java
new file mode 100644
index 0000000..ce078f1
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Has a unidirected dependency on Hadoop-dependent class.
+ */
+@SuppressWarnings("unused")
+public class WithIndirectField {
+ /** */
+ WithField x;
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java
new file mode 100644
index 0000000..360986c
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Has a field initialized with an expression invoking Hadoop method.
+ */
+
+@SuppressWarnings({"ConstantConditions", "unused"})
+public class WithInitializer {
+ /** */
+ private final Object x = org.apache.hadoop.fs.FileSystem.getDefaultUri(null);
+
+ /** */
+ WithInitializer() throws Exception {
+ // noop
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java
new file mode 100644
index 0000000..4a5a49c
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configurable;
+
+/**
+ * Has a *static* inner class depending on Hadoop.
+ */
+@SuppressWarnings("unused")
+public class WithInnerClass {
+ /** */
+ private static abstract class Foo implements Configurable {
+ // No-op.
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java
new file mode 100644
index 0000000..ea4a5de
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Has a local variable of Hadoop type.
+ */
+@SuppressWarnings({"unused", "ConstantConditions"})
+public class WithLocalVariable {
+ /** */
+ void foo() {
+ Configuration c = null;
+
+ moo(c);
+ }
+
+ /** */
+ void moo(Object x) {
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java
new file mode 100644
index 0000000..ff9fbe0
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Method has a Hadoop annotation.
+ */
+@SuppressWarnings("unused")
+public class WithMethodAnnotation {
+ /** */
+ @InterfaceStability.Unstable
+ void foo() {
+ // No-op.
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java
new file mode 100644
index 0000000..7f639e4
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Contains a formal parameter of Hadoop type.
+ */
+@SuppressWarnings("unused")
+public class WithMethodArgument {
+ /** */
+ protected void paramaterMethod(Configuration c) {
+ // No-op.
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java
new file mode 100644
index 0000000..8fd12ae
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.ChecksumException;
+
+/**
+ * Method declares a checked Hadoop Exception.
+ */
+@SuppressWarnings("unused")
+public class WithMethodCheckedException {
+ /** */
+ void foo() throws ChecksumException {
+ // No-op.
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java
new file mode 100644
index 0000000..de8b306
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Method contains a Hadoop type method invocation.
+ */
+@SuppressWarnings("unused")
+public class WithMethodInvocation {
+ /** */
+ void foo(FileSystem fs) {
+ fs.getChildFileSystems();
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java
new file mode 100644
index 0000000..0e0ea72
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Contains a method return value of Hadoop type.
+ */
+@SuppressWarnings("unused")
+public class WithMethodReturnType {
+ /** */
+ FileSystem fsMethod() {
+ return null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java
new file mode 100644
index 0000000..dcd471c
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+
+/**
+ * Method declares a runtime Hadoop Exception.
+ */
+@SuppressWarnings("unused")
+public class WithMethodRuntimeException {
+ /** */
+ void foo() throws HadoopIllegalArgumentException {
+ // No-op.
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java
new file mode 100644
index 0000000..cae1da7
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Outer class depends on Hadoop, but Inner *static* one does not.
+ */
+@SuppressWarnings("unused")
+public class WithOuterClass {
+ /** */
+ Configuration c;
+
+ /** */
+ public static class InnerNoHadoop {
+ /** */
+ int x;
+
+ /** */
+ void foo() {}
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java
new file mode 100644
index 0000000..9d3414e
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Has a paramater annotated with a Hadoop annotation.
+ */
+@SuppressWarnings("unused")
+public class WithParameterAnnotation {
+ /** */
+ void foo(@InterfaceStability.Stable Object annotatedParam) {
+ // No-op.
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java
new file mode 100644
index 0000000..301b912
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Has a static field of Hadoop type.
+ */
+@SuppressWarnings("unused")
+public class WithStaticField {
+ /** */
+ static FileSystem fs;
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java
new file mode 100644
index 0000000..e0fc2f3
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import java.util.List;
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Uses Hadoop type in a static initializer.
+ */
+@SuppressWarnings("unused")
+public class WithStaticInitializer {
+ /** */
+ static final List x;
+
+ static {
+ x = FileSystem.getAllStatistics();
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java
new file mode 100644
index 0000000..ab84740
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Class that does not anyhow depend on Hadoop.
+ */
+public class Without {
+ // No-op.
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/012ca730/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
index 6641bc8..1831085 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
@@ -54,6 +54,7 @@ import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackExternalPrimarySelfT
import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest;
import org.apache.ignite.igfs.IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest;
import org.apache.ignite.igfs.IgniteHadoopFileSystemSecondaryModeSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopClassLoaderTest;
import org.apache.ignite.internal.processors.hadoop.HadoopCommandLineTest;
import org.apache.ignite.internal.processors.hadoop.HadoopDefaultMapReducePlannerSelfTest;
import org.apache.ignite.internal.processors.hadoop.HadoopFileSystemsTest;
@@ -95,6 +96,8 @@ public class IgniteHadoopTestSuite extends TestSuite {
TestSuite suite = new TestSuite("Ignite Hadoop MR Test Suite");
+ suite.addTest(new TestSuite(ldr.loadClass(HadoopClassLoaderTest.class.getName())));
+
suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfs20FileSystemLoopbackPrimarySelfTest.class.getName())));
suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfsDualSyncSelfTest.class.getName())));
[08/11] ignite git commit: IGNITE-2206: Hadoop file system creation
is now abstracted out using factory interface.
Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
index 99ca1ec..0d7de86 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
@@ -17,22 +17,6 @@
package org.apache.ignite.hadoop.fs.v2;
-import java.io.BufferedOutputStream;
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -43,6 +27,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.InvalidPathException;
@@ -51,13 +36,14 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
+import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
import org.apache.ignite.igfs.IgfsBlockLocation;
import org.apache.ignite.igfs.IgfsFile;
import org.apache.ignite.igfs.IgfsMode;
import org.apache.ignite.igfs.IgfsPath;
import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.processors.hadoop.SecondaryFileSystemProvider;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsInputStream;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
@@ -74,8 +60,26 @@ import org.apache.ignite.internal.util.typedef.T2;
import org.apache.ignite.internal.util.typedef.internal.A;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleAware;
import org.jetbrains.annotations.Nullable;
+import java.io.BufferedOutputStream;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_BATCH_SIZE;
import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_DIR;
import static org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.getFsHadoopUser;
@@ -92,8 +96,6 @@ import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_GROUP_NAME;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_PERMISSION;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_PREFER_LOCAL_WRITES;
import static org.apache.ignite.internal.processors.igfs.IgfsEx.PROP_USER_NAME;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_CONFIG_PATH;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.SECONDARY_FS_URI;
/**
* {@code IGFS} Hadoop 2.x file system driver over file system API. To use
@@ -168,8 +170,8 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
/** Mode resolver. */
private IgfsModeResolver modeRslvr;
- /** Secondary file system instance. */
- private AbstractFileSystem secondaryFs;
+ /** The secondary file system factory. */
+ private HadoopFileSystemFactory factory;
/** Whether custom sequential reads before prefetch value is provided. */
private boolean seqReadsBeforePrefetchOverride;
@@ -335,20 +337,27 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
}
if (initSecondary) {
- Map<String, String> props = paths.properties();
+ try {
+ factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
+ }
+ catch (IgniteCheckedException e) {
+ throw new IOException("Failed to get secondary file system factory.", e);
+ }
- String secUri = props.get(SECONDARY_FS_URI);
- String secConfPath = props.get(SECONDARY_FS_CONFIG_PATH);
+ assert factory != null;
+
+ if (factory instanceof LifecycleAware)
+ ((LifecycleAware) factory).start();
try {
- SecondaryFileSystemProvider secProvider = new SecondaryFileSystemProvider(secUri, secConfPath);
+ FileSystem secFs = factory.get(user);
- secondaryFs = secProvider.createAbstractFileSystem(user);
+ secondaryUri = secFs.getUri();
- secondaryUri = secProvider.uri();
+ A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
}
catch (IOException e) {
- throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
+ throw new IOException("Failed to connect to the secondary file system: " + secondaryUri, e);
}
}
}
@@ -368,6 +377,9 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
if (clientLog.isLogEnabled())
clientLog.close();
+ if (factory instanceof LifecycleAware)
+ ((LifecycleAware) factory).stop();
+
// Reset initialized resources.
rmtClient = null;
}
@@ -391,13 +403,13 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
/** {@inheritDoc} */
@Override public boolean setReplication(Path f, short replication) throws IOException {
- return mode(f) == PROXY && secondaryFs.setReplication(f, replication);
+ return mode(f) == PROXY && secondaryFileSystem().setReplication(f, replication);
}
/** {@inheritDoc} */
@Override public void setTimes(Path f, long mtime, long atime) throws IOException {
if (mode(f) == PROXY)
- secondaryFs.setTimes(f, mtime, atime);
+ secondaryFileSystem().setTimes(f, mtime, atime);
else {
if (mtime == -1 && atime == -1)
return;
@@ -421,7 +433,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
A.notNull(p, "p");
if (mode(p) == PROXY)
- secondaryFs.setPermission(toSecondary(p), perm);
+ secondaryFileSystem().setPermission(toSecondary(p), perm);
else {
if (rmtClient.update(convert(p), permission(perm)) == null)
throw new IOException("Failed to set file permission (file not found?)" +
@@ -443,7 +455,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
try {
if (mode(p) == PROXY)
- secondaryFs.setOwner(toSecondary(p), usr, grp);
+ secondaryFileSystem().setOwner(toSecondary(p), usr, grp);
else if (rmtClient.update(convert(p), F.asMap(PROP_USER_NAME, usr, PROP_GROUP_NAME, grp)) == null)
throw new IOException("Failed to set file permission (file not found?)" +
" [path=" + p + ", username=" + usr + ", grpName=" + grp + ']');
@@ -464,11 +476,11 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
IgfsMode mode = modeRslvr.resolveMode(path);
if (mode == PROXY) {
- FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize);
+ FSDataInputStream is = secondaryFileSystem().open(toSecondary(f), bufSize);
if (clientLog.isLogEnabled()) {
// At this point we do not know file size, so we perform additional request to remote FS to get it.
- FileStatus status = secondaryFs.getFileStatus(toSecondary(f));
+ FileStatus status = secondaryFileSystem().getFileStatus(toSecondary(f));
long size = status != null ? status.getLen() : -1;
@@ -543,8 +555,8 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
if (mode == PROXY) {
- FSDataOutputStream os = secondaryFs.createInternal(toSecondary(f), flag, perm, bufSize,
- replication, blockSize, progress, checksumOpt, createParent);
+ FSDataOutputStream os = secondaryFileSystem().create(toSecondary(f), perm, flag, bufSize,
+ replication, blockSize, progress);
if (clientLog.isLogEnabled()) {
long logId = IgfsLogger.nextId();
@@ -641,7 +653,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
if (clientLog.isLogEnabled())
clientLog.logRename(srcPath, PROXY, dstPath);
- secondaryFs.renameInternal(toSecondary(src), toSecondary(dst));
+ secondaryFileSystem().rename(toSecondary(src), toSecondary(dst));
}
else {
if (clientLog.isLogEnabled())
@@ -671,7 +683,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
if (clientLog.isLogEnabled())
clientLog.logDelete(path, PROXY, recursive);
- return secondaryFs.delete(toSecondary(f), recursive);
+ return secondaryFileSystem().delete(toSecondary(f), recursive);
}
boolean res = rmtClient.delete(path, recursive);
@@ -689,14 +701,14 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
/** {@inheritDoc} */
@Override public void setVerifyChecksum(boolean verifyChecksum) throws IOException {
// Checksum has effect for secondary FS only.
- if (secondaryFs != null)
- secondaryFs.setVerifyChecksum(verifyChecksum);
+ if (factory != null)
+ secondaryFileSystem().setVerifyChecksum(verifyChecksum);
}
/** {@inheritDoc} */
@Override public FileChecksum getFileChecksum(Path f) throws IOException {
if (mode(f) == PROXY)
- return secondaryFs.getFileChecksum(f);
+ return secondaryFileSystem().getFileChecksum(f);
return null;
}
@@ -712,7 +724,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
IgfsMode mode = modeRslvr.resolveMode(path);
if (mode == PROXY) {
- FileStatus[] arr = secondaryFs.listStatus(toSecondary(f));
+ FileStatus[] arr = secondaryFileSystem().listStatus(toSecondary(f));
if (arr == null)
throw new FileNotFoundException("File " + f + " does not exist.");
@@ -775,7 +787,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
if (clientLog.isLogEnabled())
clientLog.logMakeDirectory(path, PROXY);
- secondaryFs.mkdir(toSecondary(f), perm, createParent);
+ secondaryFileSystem().mkdirs(toSecondary(f), perm);
}
else {
rmtClient.mkdirs(path, permission(perm));
@@ -797,7 +809,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
try {
if (mode(f) == PROXY)
- return toPrimary(secondaryFs.getFileStatus(toSecondary(f)));
+ return toPrimary(secondaryFileSystem().getFileStatus(toSecondary(f)));
else {
IgfsFile info = rmtClient.info(convert(f));
@@ -822,7 +834,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
try {
if (modeRslvr.resolveMode(igfsPath) == PROXY)
- return secondaryFs.getFileBlockLocations(path, start, len);
+ return secondaryFileSystem().getFileBlockLocations(path, start, len);
else {
long now = System.currentTimeMillis();
@@ -873,7 +885,7 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
* @return Secondary file system path.
*/
private Path toSecondary(Path path) {
- assert secondaryFs != null;
+ assert factory != null;
assert secondaryUri != null;
return convertPath(path, secondaryUri);
@@ -1045,4 +1057,15 @@ public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closea
public String user() {
return user;
}
+
+ /**
+ * Gets cached or creates a {@link FileSystem}.
+ *
+ * @return The secondary file system.
+ */
+ private FileSystem secondaryFileSystem() throws IOException{
+ assert factory != null;
+
+ return factory.get(user);
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/SecondaryFileSystemProvider.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/SecondaryFileSystemProvider.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/SecondaryFileSystemProvider.java
deleted file mode 100644
index d5be074..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/SecondaryFileSystemProvider.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.security.PrivilegedExceptionAction;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.AbstractFileSystem;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.IgniteUtils;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Encapsulates logic of secondary filesystem creation.
- */
-public class SecondaryFileSystemProvider {
- /** Configuration of the secondary filesystem, never null. */
- private final Configuration cfg = HadoopUtils.safeCreateConfiguration();
-
- /** The secondary filesystem URI, never null. */
- private final URI uri;
-
- /**
- * Creates new provider with given config parameters. The configuration URL is optional. The filesystem URI must be
- * specified either explicitly or in the configuration provided.
- *
- * @param secUri the secondary Fs URI (optional). If not given explicitly, it must be specified as "fs.defaultFS"
- * property in the provided configuration.
- * @param secConfPath the secondary Fs path (file path on the local file system, optional).
- * See {@link IgniteUtils#resolveIgniteUrl(String)} on how the path resolved.
- * @throws IOException
- */
- public SecondaryFileSystemProvider(final @Nullable String secUri,
- final @Nullable String secConfPath) throws IOException {
- if (secConfPath != null) {
- URL url = U.resolveIgniteUrl(secConfPath);
-
- if (url == null) {
- // If secConfPath is given, it should be resolvable:
- throw new IllegalArgumentException("Failed to resolve secondary file system configuration path " +
- "(ensure that it exists locally and you have read access to it): " + secConfPath);
- }
-
- cfg.addResource(url);
- }
-
- // if secondary fs URI is not given explicitly, try to get it from the configuration:
- if (secUri == null)
- uri = FileSystem.getDefaultUri(cfg);
- else {
- try {
- uri = new URI(secUri);
- }
- catch (URISyntaxException use) {
- throw new IOException("Failed to resolve secondary file system URI: " + secUri);
- }
- }
-
- // Disable caching:
- String prop = HadoopFileSystemsUtils.disableFsCachePropertyName(uri.getScheme());
-
- cfg.setBoolean(prop, true);
- }
-
- /**
- * @return {@link org.apache.hadoop.fs.FileSystem} instance for this secondary Fs.
- * @throws IOException
- */
- public FileSystem createFileSystem(String userName) throws IOException {
- userName = IgfsUtils.fixUserName(userName);
-
- final FileSystem fileSys;
-
- try {
- fileSys = FileSystem.get(uri, cfg, userName);
- }
- catch (InterruptedException e) {
- Thread.currentThread().interrupt();
-
- throw new IOException("Failed to create file system due to interrupt.", e);
- }
-
- return fileSys;
- }
-
- /**
- * @return {@link org.apache.hadoop.fs.AbstractFileSystem} instance for this secondary Fs.
- * @throws IOException in case of error.
- */
- public AbstractFileSystem createAbstractFileSystem(String userName) throws IOException {
- userName = IgfsUtils.fixUserName(userName);
-
- String ticketCachePath = cfg.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
-
- UserGroupInformation ugi = UserGroupInformation.getBestUGI(ticketCachePath, userName);
-
- try {
- return ugi.doAs(new PrivilegedExceptionAction<AbstractFileSystem>() {
- @Override public AbstractFileSystem run() throws IOException {
- return AbstractFileSystem.get(uri, cfg);
- }
- });
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
-
- throw new IOException("Failed to create file system due to interrupt.", ie);
- }
- }
-
- /**
- * @return the secondary fs URI, never null.
- */
- public URI uri() {
- return uri;
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
index 48ade79..1ecbee5 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
@@ -39,7 +39,7 @@ public class HadoopFileSystemCacheUtils {
public static HadoopLazyConcurrentMap<FsCacheKey, FileSystem> createHadoopLazyConcurrentMap() {
return new HadoopLazyConcurrentMap<>(
new HadoopLazyConcurrentMap.ValueFactory<FsCacheKey, FileSystem>() {
- @Override public FileSystem createValue(FsCacheKey key) {
+ @Override public FileSystem createValue(FsCacheKey key) throws IOException {
try {
assert key != null;
@@ -57,8 +57,10 @@ public class HadoopFileSystemCacheUtils {
return FileSystem.get(uri, cfg, key.user());
}
- catch (IOException | InterruptedException ioe) {
- throw new IgniteException(ioe);
+ catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+
+ throw new IOException("Failed to create file system due to interrupt.", e);
}
}
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
index 89eaf73..681cddb 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
@@ -18,6 +18,7 @@
package org.apache.ignite.internal.processors.hadoop.fs;
import java.io.Closeable;
+import java.io.IOException;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReadWriteLock;
@@ -204,8 +205,8 @@ public class HadoopLazyConcurrentMap<K, V extends Closeable> {
*
* @param key the key to create value for
* @return the value.
- * @throws IgniteException on failure.
+ * @throws IOException On failure.
*/
- public V createValue(K key);
+ public V createValue(K key) throws IOException;
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
index ea65464..10b1bcd 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
@@ -19,7 +19,7 @@ package org.apache.ignite.igfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
import org.apache.ignite.internal.processors.igfs.IgfsDualAbstractSelfTest;
@@ -74,12 +74,16 @@ public abstract class Hadoop1DualAbstractTest extends IgfsDualAbstractSelfTest {
prepareConfiguration();
- IgniteHadoopIgfsSecondaryFileSystem second =
- new IgniteHadoopIgfsSecondaryFileSystem(secondaryUri, secondaryConfFullPath);
+ CachingHadoopFileSystemFactory factory = new CachingHadoopFileSystemFactory();
- FileSystem fileSystem = second.fileSystem();
+ factory.setUri(secondaryUri);
+ factory.setConfigPaths(secondaryConfFullPath);
- igfsSecondary = new HadoopFileSystemUniversalFileSystemAdapter(fileSystem);
+ IgniteHadoopIgfsSecondaryFileSystem second = new IgniteHadoopIgfsSecondaryFileSystem();
+
+ second.setFileSystemFactory(factory);
+
+ igfsSecondary = new HadoopFileSystemUniversalFileSystemAdapter(factory);
return second;
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
new file mode 100644
index 0000000..1d02f0f
--- /dev/null
+++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.Nullable;
+import java.io.Externalizable;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.net.URI;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+
+/**
+ * Tests for Hadoop file system factory.
+ */
+public class HadoopFIleSystemFactorySelfTest extends IgfsCommonAbstractTest {
+ /** Amount of "start" invocations */
+ private static final AtomicInteger START_CNT = new AtomicInteger();
+
+ /** Amount of "stop" invocations */
+ private static final AtomicInteger STOP_CNT = new AtomicInteger();
+
+ /** Path to secondary file system configuration. */
+ private static final String SECONDARY_CFG_PATH = "/work/core-site-HadoopFIleSystemFactorySelfTest.xml";
+
+ /** IGFS path for DUAL mode. */
+ private static final Path PATH_DUAL = new Path("/ignite/sync/test_dir");
+
+ /** IGFS path for PROXY mode. */
+ private static final Path PATH_PROXY = new Path("/ignite/proxy/test_dir");
+
+ /** IGFS path for DUAL mode. */
+ private static final IgfsPath IGFS_PATH_DUAL = new IgfsPath("/ignite/sync/test_dir");
+
+ /** IGFS path for PROXY mode. */
+ private static final IgfsPath IGFS_PATH_PROXY = new IgfsPath("/ignite/proxy/test_dir");
+
+ /** Secondary IGFS. */
+ private IgfsEx secondary;
+
+ /** Primary IGFS. */
+ private IgfsEx primary;
+
+ /** {@inheritDoc} */
+ @Override protected void beforeTest() throws Exception {
+ super.beforeTest();
+
+ START_CNT.set(0);
+ STOP_CNT.set(0);
+
+ secondary = startSecondary();
+ primary = startPrimary();
+ }
+
+ /** {@inheritDoc} */
+ @Override protected void afterTest() throws Exception {
+ super.afterTest();
+
+ secondary = null;
+ primary = null;
+
+ stopAllGrids();
+ }
+
+ /**
+ * Test custom factory.
+ *
+ * @throws Exception If failed.
+ */
+ @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+ public void testCustomFactory() throws Exception {
+ assert START_CNT.get() == 1;
+ assert STOP_CNT.get() == 0;
+
+ // Use IGFS directly.
+ primary.mkdirs(IGFS_PATH_DUAL);
+
+ assert primary.exists(IGFS_PATH_DUAL);
+ assert secondary.exists(IGFS_PATH_DUAL);
+
+ GridTestUtils.assertThrows(null, new Callable<Object>() {
+ @Override public Object call() throws Exception {
+ primary.mkdirs(IGFS_PATH_PROXY);
+
+ return null;
+ }
+ }, IgfsInvalidPathException.class, null);
+
+ // Create remote instance.
+ FileSystem fs = FileSystem.get(URI.create("igfs://primary:primary@127.0.0.1:10500/"), baseConfiguration());
+
+ // Ensure lifecycle callback was invoked.
+ assert START_CNT.get() == 2;
+ assert STOP_CNT.get() == 0;
+
+ // Check file system operations.
+ assert fs.exists(PATH_DUAL);
+
+ assert fs.delete(PATH_DUAL, true);
+ assert !primary.exists(IGFS_PATH_DUAL);
+ assert !secondary.exists(IGFS_PATH_DUAL);
+ assert !fs.exists(PATH_DUAL);
+
+ assert fs.mkdirs(PATH_DUAL);
+ assert primary.exists(IGFS_PATH_DUAL);
+ assert secondary.exists(IGFS_PATH_DUAL);
+ assert fs.exists(PATH_DUAL);
+
+ assert fs.mkdirs(PATH_PROXY);
+ assert secondary.exists(IGFS_PATH_PROXY);
+ assert fs.exists(PATH_PROXY);
+
+ // Close file system and ensure that associated factory was notified.
+ fs.close();
+
+ assert START_CNT.get() == 2;
+ assert STOP_CNT.get() == 1;
+
+ // Stop primary node and ensure that base factory was notified.
+ G.stop(primary.context().kernalContext().grid().name(), true);
+
+ assert START_CNT.get() == 2;
+ assert STOP_CNT.get() == 2;
+ }
+
+ /**
+ * Start secondary IGFS.
+ *
+ * @return IGFS.
+ * @throws Exception If failed.
+ */
+ private static IgfsEx startSecondary() throws Exception {
+ return start("secondary", 11500, IgfsMode.PRIMARY, null);
+ }
+
+ /**
+ * Start primary IGFS.
+ *
+ * @return IGFS.
+ * @throws Exception If failed.
+ */
+ private static IgfsEx startPrimary() throws Exception {
+ // Prepare configuration.
+ Configuration conf = baseConfiguration();
+
+ conf.set("fs.defaultFS", "igfs://secondary:secondary@127.0.0.1:11500/");
+
+ writeConfigurationToFile(conf);
+
+ // Configure factory.
+ TestFactory factory = new TestFactory();
+
+ factory.setUri("igfs://secondary:secondary@127.0.0.1:11500/");
+ factory.setConfigPaths(SECONDARY_CFG_PATH);
+
+ // Configure file system.
+ IgniteHadoopIgfsSecondaryFileSystem fs = new IgniteHadoopIgfsSecondaryFileSystem();
+
+ fs.setFileSystemFactory(factory);
+
+ // Start.
+ return start("primary", 10500, IgfsMode.PRIMARY, fs);
+ }
+
+ /**
+ * Start Ignite node with IGFS instance.
+ *
+ * @param name Node and IGFS name.
+ * @param endpointPort Endpoint port.
+ * @param dfltMode Default path mode.
+ * @param secondaryFs Secondary file system.
+ * @return Igfs instance.
+ */
+ private static IgfsEx start(String name, int endpointPort, IgfsMode dfltMode,
+ @Nullable IgfsSecondaryFileSystem secondaryFs) {
+ IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+ endpointCfg.setType(IgfsIpcEndpointType.TCP);
+ endpointCfg.setHost("127.0.0.1");
+ endpointCfg.setPort(endpointPort);
+
+ FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+ igfsCfg.setDataCacheName("dataCache");
+ igfsCfg.setMetaCacheName("metaCache");
+ igfsCfg.setName(name);
+ igfsCfg.setDefaultMode(dfltMode);
+ igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+ igfsCfg.setSecondaryFileSystem(secondaryFs);
+
+ CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
+
+ dataCacheCfg.setName("dataCache");
+ dataCacheCfg.setCacheMode(PARTITIONED);
+ dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+ dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
+ dataCacheCfg.setBackups(0);
+ dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
+ dataCacheCfg.setOffHeapMaxMemory(0);
+
+ CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+ metaCacheCfg.setName("metaCache");
+ metaCacheCfg.setCacheMode(REPLICATED);
+ metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+ metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+ IgniteConfiguration cfg = new IgniteConfiguration();
+
+ cfg.setGridName(name);
+
+ TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+ discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+ cfg.setDiscoverySpi(discoSpi);
+ cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
+ cfg.setFileSystemConfiguration(igfsCfg);
+
+ cfg.setLocalHost("127.0.0.1");
+ cfg.setConnectorConfiguration(null);
+
+ return (IgfsEx)G.start(cfg).fileSystem(name);
+ }
+
+ /**
+ * Create base FileSystem configuration.
+ *
+ * @return Configuration.
+ */
+ private static Configuration baseConfiguration() {
+ Configuration conf = new Configuration();
+
+ conf.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
+
+ return conf;
+ }
+
+ /**
+ * Write configuration to file.
+ *
+ * @param conf Configuration.
+ * @throws Exception If failed.
+ */
+ @SuppressWarnings("ResultOfMethodCallIgnored")
+ private static void writeConfigurationToFile(Configuration conf) throws Exception {
+ final String path = U.getIgniteHome() + SECONDARY_CFG_PATH;
+
+ File file = new File(path);
+
+ file.delete();
+
+ assertFalse(file.exists());
+
+ try (FileOutputStream fos = new FileOutputStream(file)) {
+ conf.writeXml(fos);
+ }
+
+ assertTrue(file.exists());
+ }
+
+ /**
+ * Test factory.
+ */
+ private static class TestFactory extends CachingHadoopFileSystemFactory {
+ /**
+ * {@link Externalizable} support.
+ */
+ public TestFactory() {
+ // No-op.
+ }
+
+ /** {@inheritDoc} */
+ @Override public void start() throws IgniteException {
+ START_CNT.incrementAndGet();
+
+ super.start();
+ }
+
+ /** {@inheritDoc} */
+ @Override public void stop() throws IgniteException {
+ STOP_CNT.incrementAndGet();
+
+ super.stop();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFileSystemUniversalFileSystemAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFileSystemUniversalFileSystemAdapter.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFileSystemUniversalFileSystemAdapter.java
index 608bd25..5b6fd81 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFileSystemUniversalFileSystemAdapter.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFileSystemUniversalFileSystemAdapter.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
import org.apache.ignite.internal.processors.igfs.IgfsEx;
import org.apache.ignite.internal.processors.igfs.UniversalFileSystemAdapter;
@@ -34,55 +36,55 @@ import org.apache.ignite.internal.processors.igfs.UniversalFileSystemAdapter;
* Universal adapter wrapping {@link org.apache.hadoop.fs.FileSystem} instance.
*/
public class HadoopFileSystemUniversalFileSystemAdapter implements UniversalFileSystemAdapter {
- /** The wrapped filesystem. */
- private final FileSystem fileSys;
+ /** File system factory. */
+ private final HadoopFileSystemFactory factory;
/**
* Constructor.
- * @param fs the filesystem to be wrapped.
+ * @param factory File system factory.
*/
- public HadoopFileSystemUniversalFileSystemAdapter(FileSystem fs) {
- this.fileSys = fs;
+ public HadoopFileSystemUniversalFileSystemAdapter(HadoopFileSystemFactory factory) {
+ assert factory != null;
+
+ this.factory = factory;
}
/** {@inheritDoc} */
- @Override public String name() {
- return fileSys.getUri().toString();
+ @Override public String name() throws IOException {
+ return get().getUri().toString();
}
/** {@inheritDoc} */
@Override public boolean exists(String path) throws IOException {
- return fileSys.exists(new Path(path));
+ return get().exists(new Path(path));
}
/** {@inheritDoc} */
@Override public boolean delete(String path, boolean recursive) throws IOException {
- boolean ok = fileSys.delete(new Path(path), recursive);
- return ok;
+ return get().delete(new Path(path), recursive);
}
/** {@inheritDoc} */
@Override public void mkdirs(String path) throws IOException {
- boolean ok = fileSys.mkdirs(new Path(path));
+ boolean ok = get().mkdirs(new Path(path));
if (!ok)
throw new IOException("Failed to mkdirs: " + path);
}
/** {@inheritDoc} */
@Override public void format() throws IOException {
- HadoopIgfsUtils.clear(fileSys);
+ HadoopIgfsUtils.clear(get());
}
/** {@inheritDoc} */
@Override public Map<String, String> properties(String path) throws IOException {
Path p = new Path(path);
- FileStatus status = fileSys.getFileStatus(p);
+ FileStatus status = get().getFileStatus(p);
Map<String,String> m = new HashMap<>(3); // max size == 4
m.put(IgfsEx.PROP_USER_NAME, status.getOwner());
-
m.put(IgfsEx.PROP_GROUP_NAME, status.getGroup());
FsPermission perm = status.getPermission();
@@ -95,7 +97,7 @@ public class HadoopFileSystemUniversalFileSystemAdapter implements UniversalFile
/** {@inheritDoc} */
@Override public InputStream openInputStream(String path) throws IOException {
- return fileSys.open(new Path(path));
+ return get().open(new Path(path));
}
/** {@inheritDoc} */
@@ -103,16 +105,27 @@ public class HadoopFileSystemUniversalFileSystemAdapter implements UniversalFile
Path p = new Path(path);
if (append)
- return fileSys.append(p);
+ return get().append(p);
else
- return fileSys.create(p, true/*overwrite*/);
+ return get().create(p, true/*overwrite*/);
}
/** {@inheritDoc} */
- @Override public <T> T getAdapter(Class<T> clazz) {
- if (clazz == FileSystem.class)
- return (T)fileSys;
+ @SuppressWarnings("unchecked")
+ @Override public <T> T unwrap(Class<T> cls) {
+ if (HadoopFileSystemFactory.class.isAssignableFrom(cls))
+ return (T)factory;
return null;
}
+
+ /**
+ * Create file system.
+ *
+ * @return File system.
+ * @throws IOException If failed.
+ */
+ private FileSystem get() throws IOException {
+ return factory.get(FileSystemConfiguration.DFLT_USER_NAME);
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
index 4ddfb0d..d9b5d66 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
@@ -17,12 +17,6 @@
package org.apache.ignite.igfs;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.net.URI;
-import java.util.concurrent.Callable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -34,9 +28,9 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.FileSystemConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.processors.hadoop.SecondaryFileSystemProvider;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
import org.apache.ignite.internal.util.typedef.G;
@@ -48,6 +42,13 @@ import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
import org.apache.ignite.testframework.GridTestUtils;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.URI;
+import java.util.concurrent.Callable;
+
import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
import static org.apache.ignite.cache.CacheMode.PARTITIONED;
import static org.apache.ignite.cache.CacheMode.REPLICATED;
@@ -173,12 +174,16 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra
else
primaryConfFullPath = null;
- SecondaryFileSystemProvider provider =
- new SecondaryFileSystemProvider(primaryFsUriStr, primaryConfFullPath);
+ CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
+
+ fac.setConfigPaths(primaryConfFullPath);
+ fac.setUri(primaryFsUriStr);
+
+ fac.start();
- primaryFs = provider.createFileSystem(null);
+ primaryFs = fac.get(null); //provider.createFileSystem(null);
- primaryFsUri = provider.uri();
+ primaryFsUri = primaryFs.getUri();
}
/**
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
index d368955..6617127 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
@@ -17,29 +17,6 @@
package org.apache.ignite.igfs;
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.lang.reflect.Field;
-import java.net.URI;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayDeque;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Deque;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
@@ -59,6 +36,7 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.FileSystemConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEx;
@@ -70,6 +48,7 @@ import org.apache.ignite.internal.util.GridConcurrentHashSet;
import org.apache.ignite.internal.util.lang.GridAbsPredicate;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.X;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteBiTuple;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
@@ -79,6 +58,30 @@ import org.apache.ignite.testframework.GridTestUtils;
import org.jetbrains.annotations.Nullable;
import org.jsr166.ThreadLocalRandom8;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.reflect.Field;
+import java.net.URI;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
import static org.apache.ignite.cache.CacheMode.PARTITIONED;
import static org.apache.ignite.cache.CacheMode.REPLICATED;
@@ -380,9 +383,20 @@ public abstract class IgniteHadoopFileSystemAbstractSelfTest extends IgfsCommonA
cfg.setPrefetchBlocks(1);
cfg.setDefaultMode(mode);
- if (mode != PRIMARY)
- cfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
- SECONDARY_URI, SECONDARY_CFG_PATH, SECONDARY_FS_USER));
+ if (mode != PRIMARY) {
+ CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
+
+ fac.setUri(SECONDARY_URI);
+ fac.setConfigPaths(SECONDARY_CFG_PATH);
+
+ IgniteHadoopIgfsSecondaryFileSystem sec = new IgniteHadoopIgfsSecondaryFileSystem();
+
+ sec.setFileSystemFactory(fac);
+ sec.setDefaultUserName(SECONDARY_FS_USER);
+
+ // NB: start() will be invoked upon IgfsImpl init.
+ cfg.setSecondaryFileSystem(sec);
+ }
cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName));
@@ -398,7 +412,8 @@ public abstract class IgniteHadoopFileSystemAbstractSelfTest extends IgfsCommonA
@Override public Object call() throws Exception {
return new IgniteHadoopFileSystem().getUri();
}
- }, IllegalStateException.class, "URI is null (was IgniteHadoopFileSystem properly initialized?).");
+ }, IllegalStateException.class,
+ "URI is null (was IgniteHadoopFileSystem properly initialized?)");
}
/** @throws Exception If failed. */
@@ -506,7 +521,7 @@ public abstract class IgniteHadoopFileSystemAbstractSelfTest extends IgfsCommonA
// Ensure that IO is stopped when nobody else is need it.
fs.close();
- assertEquals(initSize - 1, cache.size());
+ assert initSize >= cache.size();
assert (Boolean)stopField.get(io);
}
http://git-wip-us.apache.org/repos/asf/ignite/blob/8ed73b4a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
index 6c542b5..9092f32 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
@@ -37,6 +37,7 @@ import org.apache.ignite.client.hadoop.HadoopClientProtocolEmbeddedSelfTest;
import org.apache.ignite.client.hadoop.HadoopClientProtocolSelfTest;
import org.apache.ignite.igfs.Hadoop1OverIgfsDualAsyncTest;
import org.apache.ignite.igfs.Hadoop1OverIgfsDualSyncTest;
+import org.apache.ignite.igfs.HadoopFIleSystemFactorySelfTest;
import org.apache.ignite.igfs.HadoopIgfs20FileSystemLoopbackPrimarySelfTest;
import org.apache.ignite.igfs.HadoopIgfsDualAsyncSelfTest;
import org.apache.ignite.igfs.HadoopIgfsDualSyncSelfTest;
@@ -113,6 +114,8 @@ public class IgniteHadoopTestSuite extends TestSuite {
suite.addTest(new TestSuite(ldr.loadClass(Hadoop1OverIgfsDualSyncTest.class.getName())));
suite.addTest(new TestSuite(ldr.loadClass(Hadoop1OverIgfsDualAsyncTest.class.getName())));
+ suite.addTest(new TestSuite(ldr.loadClass(HadoopFIleSystemFactorySelfTest.class.getName())));
+
suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.class.getName())));
suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.class.getName())));
suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.class.getName())));