You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by vo...@apache.org on 2016/09/16 11:21:34 UTC
[51/51] [partial] ignite git commit: IGNITE-3916: Initial impl.
IGNITE-3916: Initial impl.
Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/b7489457
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/b7489457
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/b7489457
Branch: refs/heads/ignite-3916
Commit: b74894579b05fae59e5ec9c9de93be6c6de830c2
Parents: 2fe0272
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Fri Sep 16 14:20:11 2016 +0300
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Fri Sep 16 14:20:24 2016 +0300
----------------------------------------------------------------------
assembly/dependencies-fabric-lgpl.xml | 1 +
assembly/dependencies-fabric.xml | 1 +
assembly/dependencies-hadoop.xml | 1 +
assembly/libs/README.txt | 3 +-
.../ignite/internal/IgniteComponentType.java | 2 +-
.../processors/hadoop/HadoopNoopProcessor.java | 5 +-
modules/hadoop-impl/README.txt | 33 +
modules/hadoop-impl/config/core-site.ignite.xml | 90 +
modules/hadoop-impl/config/hive-site.ignite.xml | 37 +
.../hadoop-impl/config/mapred-site.ignite.xml | 66 +
modules/hadoop-impl/licenses/apache-2.0.txt | 202 +
modules/hadoop-impl/pom.xml | 158 +
.../hadoop/fs/BasicHadoopFileSystemFactory.java | 275 +
.../fs/CachingHadoopFileSystemFactory.java | 85 +
.../hadoop/fs/HadoopFileSystemFactory.java | 52 +
.../fs/IgniteHadoopFileSystemCounterWriter.java | 103 +
.../fs/IgniteHadoopIgfsSecondaryFileSystem.java | 580 +
.../fs/KerberosHadoopFileSystemFactory.java | 217 +
.../apache/ignite/hadoop/fs/package-info.java | 22 +
.../hadoop/fs/v1/IgniteHadoopFileSystem.java | 1364 ++
.../ignite/hadoop/fs/v1/package-info.java | 22 +
.../hadoop/fs/v2/IgniteHadoopFileSystem.java | 1076 ++
.../ignite/hadoop/fs/v2/package-info.java | 22 +
.../IgniteHadoopClientProtocolProvider.java | 144 +
.../mapreduce/IgniteHadoopMapReducePlanner.java | 416 +
.../IgniteHadoopWeightedMapReducePlanner.java | 846 ++
.../ignite/hadoop/mapreduce/package-info.java | 22 +
.../org/apache/ignite/hadoop/package-info.java | 22 +
.../ignite/hadoop/util/BasicUserNameMapper.java | 112 +
.../hadoop/util/ChainedUserNameMapper.java | 94 +
.../hadoop/util/KerberosUserNameMapper.java | 137 +
.../ignite/hadoop/util/UserNameMapper.java | 37 +
.../apache/ignite/hadoop/util/package-info.java | 22 +
.../processors/hadoop/HadoopAttributes.java | 168 +
.../processors/hadoop/HadoopClassLoader.java | 363 +
.../hadoop/HadoopClassLoaderUtils.java | 684 +
.../processors/hadoop/HadoopComponent.java | 62 +
.../processors/hadoop/HadoopContext.java | 201 +
.../processors/hadoop/HadoopDefaultJobInfo.java | 156 +
.../internal/processors/hadoop/HadoopImpl.java | 134 +
.../hadoop/HadoopMapReduceCounterGroup.java | 123 +
.../hadoop/HadoopMapReduceCounters.java | 228 +
.../processors/hadoop/HadoopProcessor.java | 223 +
.../internal/processors/hadoop/HadoopSetup.java | 542 +
.../hadoop/HadoopTaskCancelledException.java | 35 +
.../internal/processors/hadoop/HadoopUtils.java | 443 +
.../hadoop/counter/HadoopCounterAdapter.java | 129 +
.../hadoop/counter/HadoopCountersImpl.java | 200 +
.../hadoop/counter/HadoopLongCounter.java | 93 +
.../counter/HadoopPerformanceCounter.java | 288 +
.../hadoop/fs/HadoopFileSystemCacheUtils.java | 242 +
.../hadoop/fs/HadoopFileSystemsUtils.java | 51 +
.../hadoop/fs/HadoopLazyConcurrentMap.java | 212 +
.../hadoop/fs/HadoopLocalFileSystemV1.java | 39 +
.../hadoop/fs/HadoopLocalFileSystemV2.java | 88 +
.../processors/hadoop/fs/HadoopParameters.java | 94 +
.../hadoop/fs/HadoopRawLocalFileSystem.java | 314 +
.../processors/hadoop/igfs/HadoopIgfs.java | 202 +
.../igfs/HadoopIgfsCommunicationException.java | 57 +
.../hadoop/igfs/HadoopIgfsEndpoint.java | 210 +
.../processors/hadoop/igfs/HadoopIgfsEx.java | 93 +
.../hadoop/igfs/HadoopIgfsFuture.java | 97 +
.../hadoop/igfs/HadoopIgfsInProc.java | 510 +
.../hadoop/igfs/HadoopIgfsInputStream.java | 629 +
.../processors/hadoop/igfs/HadoopIgfsIo.java | 76 +
.../processors/hadoop/igfs/HadoopIgfsIpcIo.java | 624 +
.../hadoop/igfs/HadoopIgfsIpcIoListener.java | 36 +
.../hadoop/igfs/HadoopIgfsJclLogger.java | 116 +
.../hadoop/igfs/HadoopIgfsOutProc.java | 524 +
.../hadoop/igfs/HadoopIgfsOutputStream.java | 201 +
.../hadoop/igfs/HadoopIgfsProperties.java | 86 +
.../hadoop/igfs/HadoopIgfsProxyInputStream.java | 337 +
.../igfs/HadoopIgfsProxyOutputStream.java | 165 +
...fsSecondaryFileSystemPositionedReadable.java | 105 +
.../hadoop/igfs/HadoopIgfsStreamDelegate.java | 96 +
.../igfs/HadoopIgfsStreamEventListener.java | 39 +
.../processors/hadoop/igfs/HadoopIgfsUtils.java | 174 +
.../hadoop/igfs/HadoopIgfsWrapper.java | 552 +
.../hadoop/jobtracker/HadoopJobMetadata.java | 316 +
.../hadoop/jobtracker/HadoopJobTracker.java | 1706 +++
.../hadoop/message/HadoopMessage.java | 27 +
.../planner/HadoopAbstractMapReducePlanner.java | 116 +
.../planner/HadoopDefaultMapReducePlan.java | 109 +
.../planner/HadoopMapReducePlanGroup.java | 150 +
.../planner/HadoopMapReducePlanTopology.java | 89 +
.../hadoop/proto/HadoopClientProtocol.java | 349 +
.../proto/HadoopProtocolJobCountersTask.java | 46 +
.../proto/HadoopProtocolJobStatusTask.java | 82 +
.../hadoop/proto/HadoopProtocolKillJobTask.java | 46 +
.../proto/HadoopProtocolNextTaskIdTask.java | 36 +
.../proto/HadoopProtocolSubmitJobTask.java | 59 +
.../hadoop/proto/HadoopProtocolTaskAdapter.java | 120 +
.../proto/HadoopProtocolTaskArguments.java | 84 +
.../hadoop/shuffle/HadoopShuffle.java | 263 +
.../hadoop/shuffle/HadoopShuffleAck.java | 92 +
.../hadoop/shuffle/HadoopShuffleJob.java | 612 +
.../hadoop/shuffle/HadoopShuffleMessage.java | 242 +
.../HadoopConcurrentHashMultimap.java | 616 +
.../shuffle/collections/HadoopHashMultimap.java | 176 +
.../collections/HadoopHashMultimapBase.java | 211 +
.../shuffle/collections/HadoopMultimap.java | 113 +
.../shuffle/collections/HadoopMultimapBase.java | 435 +
.../shuffle/collections/HadoopSkipList.java | 733 +
.../shuffle/streams/HadoopDataInStream.java | 171 +
.../shuffle/streams/HadoopDataOutStream.java | 130 +
.../shuffle/streams/HadoopOffheapBuffer.java | 122 +
.../HadoopEmbeddedTaskExecutor.java | 153 +
.../taskexecutor/HadoopExecutorService.java | 234 +
.../hadoop/taskexecutor/HadoopRunnableTask.java | 293 +
.../taskexecutor/HadoopTaskExecutorAdapter.java | 59 +
.../hadoop/taskexecutor/HadoopTaskState.java | 38 +
.../hadoop/taskexecutor/HadoopTaskStatus.java | 116 +
.../external/HadoopExternalTaskExecutor.java | 976 ++
.../external/HadoopExternalTaskMetadata.java | 67 +
.../external/HadoopJobInfoUpdateRequest.java | 113 +
.../external/HadoopPrepareForJobRequest.java | 130 +
.../external/HadoopProcessDescriptor.java | 149 +
.../external/HadoopProcessStartedAck.java | 47 +
.../external/HadoopTaskExecutionRequest.java | 114 +
.../external/HadoopTaskFinishedMessage.java | 94 +
.../child/HadoopChildProcessRunner.java | 459 +
.../child/HadoopExternalProcessStarter.java | 301 +
.../HadoopAbstractCommunicationClient.java | 96 +
.../HadoopCommunicationClient.java | 72 +
.../HadoopExternalCommunication.java | 1460 ++
.../HadoopHandshakeTimeoutException.java | 42 +
.../communication/HadoopIpcToNioAdapter.java | 248 +
.../communication/HadoopMarshallerFilter.java | 86 +
.../communication/HadoopMessageListener.java | 39 +
.../HadoopTcpNioCommunicationClient.java | 93 +
.../hadoop/v1/HadoopV1CleanupTask.java | 64 +
.../processors/hadoop/v1/HadoopV1Counter.java | 106 +
.../processors/hadoop/v1/HadoopV1MapTask.java | 122 +
.../hadoop/v1/HadoopV1OutputCollector.java | 137 +
.../hadoop/v1/HadoopV1Partitioner.java | 44 +
.../hadoop/v1/HadoopV1ReduceTask.java | 101 +
.../processors/hadoop/v1/HadoopV1Reporter.java | 81 +
.../processors/hadoop/v1/HadoopV1SetupTask.java | 56 +
.../processors/hadoop/v1/HadoopV1Splitter.java | 102 +
.../processors/hadoop/v1/HadoopV1Task.java | 97 +
.../processors/hadoop/v2/HadoopDaemon.java | 126 +
.../hadoop/v2/HadoopExternalSplit.java | 89 +
.../hadoop/v2/HadoopSerializationWrapper.java | 138 +
.../hadoop/v2/HadoopShutdownHookManager.java | 98 +
.../hadoop/v2/HadoopSplitWrapper.java | 119 +
.../hadoop/v2/HadoopV2CleanupTask.java | 72 +
.../processors/hadoop/v2/HadoopV2Context.java | 243 +
.../processors/hadoop/v2/HadoopV2Counter.java | 88 +
.../processors/hadoop/v2/HadoopV2Job.java | 445 +
.../hadoop/v2/HadoopV2JobResourceManager.java | 323 +
.../processors/hadoop/v2/HadoopV2MapTask.java | 99 +
.../hadoop/v2/HadoopV2Partitioner.java | 44 +
.../hadoop/v2/HadoopV2ReduceTask.java | 91 +
.../processors/hadoop/v2/HadoopV2SetupTask.java | 65 +
.../processors/hadoop/v2/HadoopV2Splitter.java | 111 +
.../processors/hadoop/v2/HadoopV2Task.java | 185 +
.../hadoop/v2/HadoopV2TaskContext.java | 560 +
.../hadoop/v2/HadoopWritableSerialization.java | 75 +
...op.mapreduce.protocol.ClientProtocolProvider | 1 +
.../HadoopClientProtocolEmbeddedSelfTest.java | 35 +
.../hadoop/HadoopClientProtocolSelfTest.java | 654 +
.../hadoop/cache/HadoopTxConfigCacheTest.java | 42 +
...KerberosHadoopFileSystemFactorySelfTest.java | 121 +
.../util/BasicUserNameMapperSelfTest.java | 133 +
.../util/ChainedUserNameMapperSelfTest.java | 107 +
.../util/KerberosUserNameMapperSelfTest.java | 99 +
.../ignite/igfs/Hadoop1DualAbstractTest.java | 158 +
.../igfs/Hadoop1OverIgfsDualAsyncTest.java | 30 +
.../igfs/Hadoop1OverIgfsDualSyncTest.java | 30 +
.../igfs/HadoopFIleSystemFactorySelfTest.java | 317 +
.../HadoopIgfs20FileSystemAbstractSelfTest.java | 2040 +++
...Igfs20FileSystemLoopbackPrimarySelfTest.java | 74 +
...oopIgfs20FileSystemShmemPrimarySelfTest.java | 74 +
.../igfs/HadoopIgfsDualAbstractSelfTest.java | 321 +
.../igfs/HadoopIgfsDualAsyncSelfTest.java | 32 +
.../ignite/igfs/HadoopIgfsDualSyncSelfTest.java | 32 +
...adoopIgfsSecondaryFileSystemTestAdapter.java | 149 +
...oopSecondaryFileSystemConfigurationTest.java | 575 +
.../apache/ignite/igfs/IgfsEventsTestSuite.java | 285 +
.../igfs/IgfsNearOnlyMultiNodeSelfTest.java | 223 +
.../IgniteHadoopFileSystemAbstractSelfTest.java | 2432 +++
.../IgniteHadoopFileSystemClientSelfTest.java | 212 +
...IgniteHadoopFileSystemHandshakeSelfTest.java | 389 +
.../IgniteHadoopFileSystemIpcCacheSelfTest.java | 214 +
.../IgniteHadoopFileSystemLoggerSelfTest.java | 298 +
...niteHadoopFileSystemLoggerStateSelfTest.java | 329 +
...adoopFileSystemLoopbackAbstractSelfTest.java | 46 +
...SystemLoopbackEmbeddedDualAsyncSelfTest.java | 33 +
...eSystemLoopbackEmbeddedDualSyncSelfTest.java | 33 +
...leSystemLoopbackEmbeddedPrimarySelfTest.java | 33 +
...SystemLoopbackEmbeddedSecondarySelfTest.java | 34 +
...SystemLoopbackExternalDualAsyncSelfTest.java | 33 +
...eSystemLoopbackExternalDualSyncSelfTest.java | 33 +
...leSystemLoopbackExternalPrimarySelfTest.java | 33 +
...SystemLoopbackExternalSecondarySelfTest.java | 34 +
...condaryFileSystemInitializationSelfTest.java | 214 +
...teHadoopFileSystemShmemAbstractSelfTest.java | 91 +
...ileSystemShmemEmbeddedDualAsyncSelfTest.java | 33 +
...FileSystemShmemEmbeddedDualSyncSelfTest.java | 33 +
...pFileSystemShmemEmbeddedPrimarySelfTest.java | 33 +
...ileSystemShmemEmbeddedSecondarySelfTest.java | 33 +
...ileSystemShmemExternalDualAsyncSelfTest.java | 33 +
...FileSystemShmemExternalDualSyncSelfTest.java | 33 +
...pFileSystemShmemExternalPrimarySelfTest.java | 33 +
...ileSystemShmemExternalSecondarySelfTest.java | 33 +
.../hadoop/HadoopAbstractMapReduceTest.java | 429 +
.../hadoop/HadoopAbstractSelfTest.java | 239 +
.../hadoop/HadoopAbstractWordCountTest.java | 175 +
.../hadoop/HadoopClassLoaderTest.java | 110 +
.../hadoop/HadoopCommandLineTest.java | 474 +
.../HadoopDefaultMapReducePlannerSelfTest.java | 615 +
.../processors/hadoop/HadoopErrorSimulator.java | 326 +
.../hadoop/HadoopFileSystemsTest.java | 155 +
.../processors/hadoop/HadoopGroupingTest.java | 307 +
.../hadoop/HadoopJobTrackerSelfTest.java | 345 +
.../hadoop/HadoopMapReduceEmbeddedSelfTest.java | 253 +
.../HadoopMapReduceErrorResilienceTest.java | 154 +
.../processors/hadoop/HadoopMapReduceTest.java | 66 +
.../hadoop/HadoopNoHadoopMapReduceTest.java | 47 +
.../processors/hadoop/HadoopPlannerMockJob.java | 168 +
.../hadoop/HadoopPopularWordsTest.java | 298 +
.../HadoopSerializationWrapperSelfTest.java | 79 +
.../processors/hadoop/HadoopSharedMap.java | 66 +
.../hadoop/HadoopSnappyFullMapReduceTest.java | 36 +
.../processors/hadoop/HadoopSnappyTest.java | 102 +
.../hadoop/HadoopSortingExternalTest.java | 46 +
.../processors/hadoop/HadoopSortingTest.java | 303 +
.../hadoop/HadoopSplitWrapperSelfTest.java | 72 +
.../processors/hadoop/HadoopStartup.java | 54 +
.../hadoop/HadoopTaskExecutionSelfTest.java | 567 +
.../hadoop/HadoopTasksAllVersionsTest.java | 260 +
.../processors/hadoop/HadoopTasksV1Test.java | 58 +
.../processors/hadoop/HadoopTasksV2Test.java | 77 +
.../hadoop/HadoopTestRoundRobinMrPlanner.java | 71 +
.../hadoop/HadoopTestTaskContext.java | 228 +
.../processors/hadoop/HadoopTestUtils.java | 178 +
.../hadoop/HadoopUserLibsSelfTest.java | 260 +
.../processors/hadoop/HadoopV2JobSelfTest.java | 100 +
.../hadoop/HadoopValidationSelfTest.java | 53 +
.../HadoopWeightedMapReducePlannerTest.java | 599 +
.../HadoopWeightedPlannerMapReduceTest.java | 38 +
.../hadoop/books/alice-in-wonderland.txt | 3735 +++++
.../processors/hadoop/books/art-of-war.txt | 6982 +++++++++
.../hadoop/books/huckleberry-finn.txt | 11733 +++++++++++++++
.../processors/hadoop/books/sherlock-holmes.txt | 13052 +++++++++++++++++
.../processors/hadoop/books/tom-sawyer.txt | 8858 +++++++++++
.../hadoop/deps/CircularWIthHadoop.java | 32 +
.../hadoop/deps/CircularWithoutHadoop.java | 27 +
.../processors/hadoop/deps/WithCast.java | 41 +
.../hadoop/deps/WithClassAnnotation.java | 28 +
.../hadoop/deps/WithConstructorInvocation.java | 31 +
.../processors/hadoop/deps/WithExtends.java | 27 +
.../processors/hadoop/deps/WithField.java | 29 +
.../processors/hadoop/deps/WithImplements.java | 36 +
.../hadoop/deps/WithIndirectField.java | 27 +
.../processors/hadoop/deps/WithInitializer.java | 33 +
.../processors/hadoop/deps/WithInnerClass.java | 31 +
.../hadoop/deps/WithLocalVariable.java | 38 +
.../hadoop/deps/WithMethodAnnotation.java | 32 +
.../hadoop/deps/WithMethodArgument.java | 31 +
.../hadoop/deps/WithMethodCheckedException.java | 31 +
.../hadoop/deps/WithMethodInvocation.java | 31 +
.../hadoop/deps/WithMethodReturnType.java | 31 +
.../hadoop/deps/WithMethodRuntimeException.java | 31 +
.../processors/hadoop/deps/WithOuterClass.java | 38 +
.../hadoop/deps/WithParameterAnnotation.java | 31 +
.../processors/hadoop/deps/WithStaticField.java | 29 +
.../hadoop/deps/WithStaticInitializer.java | 34 +
.../processors/hadoop/deps/Without.java | 25 +
.../hadoop/examples/HadoopWordCount1.java | 94 +
.../hadoop/examples/HadoopWordCount1Map.java | 79 +
.../hadoop/examples/HadoopWordCount1Reduce.java | 61 +
.../hadoop/examples/HadoopWordCount2.java | 111 +
.../examples/HadoopWordCount2Combiner.java | 45 +
.../hadoop/examples/HadoopWordCount2Mapper.java | 88 +
.../examples/HadoopWordCount2Reducer.java | 113 +
.../collections/HadoopAbstractMapTest.java | 174 +
.../HadoopConcurrentHashMultimapSelftest.java | 278 +
.../collections/HadoopHashMapSelfTest.java | 131 +
.../collections/HadoopSkipListSelfTest.java | 318 +
.../streams/HadoopDataStreamSelfTest.java | 150 +
.../taskexecutor/HadoopExecutorServiceTest.java | 118 +
.../HadoopExternalTaskExecutionSelfTest.java | 232 +
.../HadoopExternalCommunicationSelfTest.java | 220 +
.../testsuites/IgniteHadoopTestSuite.java | 354 +
.../IgniteIgfsLinuxAndMacOSTestSuite.java | 72 +
modules/hadoop/pom.xml | 36 -
.../hadoop/fs/BasicHadoopFileSystemFactory.java | 275 -
.../fs/CachingHadoopFileSystemFactory.java | 85 -
.../hadoop/fs/HadoopFileSystemFactory.java | 52 -
.../fs/IgniteHadoopFileSystemCounterWriter.java | 103 -
.../fs/IgniteHadoopIgfsSecondaryFileSystem.java | 580 -
.../fs/KerberosHadoopFileSystemFactory.java | 217 -
.../apache/ignite/hadoop/fs/package-info.java | 22 -
.../hadoop/fs/v1/IgniteHadoopFileSystem.java | 1364 --
.../ignite/hadoop/fs/v1/package-info.java | 22 -
.../hadoop/fs/v2/IgniteHadoopFileSystem.java | 1076 --
.../ignite/hadoop/fs/v2/package-info.java | 22 -
.../IgniteHadoopClientProtocolProvider.java | 144 -
.../mapreduce/IgniteHadoopMapReducePlanner.java | 416 -
.../IgniteHadoopWeightedMapReducePlanner.java | 846 --
.../ignite/hadoop/mapreduce/package-info.java | 22 -
.../org/apache/ignite/hadoop/package-info.java | 22 -
.../ignite/hadoop/util/BasicUserNameMapper.java | 112 -
.../hadoop/util/ChainedUserNameMapper.java | 94 -
.../hadoop/util/KerberosUserNameMapper.java | 137 -
.../ignite/hadoop/util/UserNameMapper.java | 37 -
.../apache/ignite/hadoop/util/package-info.java | 22 -
.../processors/hadoop/HadoopAttributes.java | 168 -
.../processors/hadoop/HadoopClassLoader.java | 363 -
.../hadoop/HadoopClassLoaderUtils.java | 684 -
.../processors/hadoop/HadoopComponent.java | 62 -
.../processors/hadoop/HadoopContext.java | 201 -
.../processors/hadoop/HadoopDefaultJobInfo.java | 156 -
.../internal/processors/hadoop/HadoopImpl.java | 134 -
.../hadoop/HadoopMapReduceCounterGroup.java | 123 -
.../hadoop/HadoopMapReduceCounters.java | 228 -
.../processors/hadoop/HadoopProcessor.java | 223 -
.../internal/processors/hadoop/HadoopSetup.java | 541 -
.../hadoop/HadoopTaskCancelledException.java | 35 -
.../internal/processors/hadoop/HadoopUtils.java | 443 -
.../hadoop/counter/HadoopCounterAdapter.java | 129 -
.../hadoop/counter/HadoopCountersImpl.java | 200 -
.../hadoop/counter/HadoopLongCounter.java | 93 -
.../counter/HadoopPerformanceCounter.java | 288 -
.../hadoop/fs/HadoopFileSystemCacheUtils.java | 242 -
.../hadoop/fs/HadoopFileSystemsUtils.java | 51 -
.../hadoop/fs/HadoopLazyConcurrentMap.java | 212 -
.../hadoop/fs/HadoopLocalFileSystemV1.java | 39 -
.../hadoop/fs/HadoopLocalFileSystemV2.java | 88 -
.../processors/hadoop/fs/HadoopParameters.java | 94 -
.../hadoop/fs/HadoopRawLocalFileSystem.java | 314 -
.../processors/hadoop/igfs/HadoopIgfs.java | 202 -
.../igfs/HadoopIgfsCommunicationException.java | 57 -
.../hadoop/igfs/HadoopIgfsEndpoint.java | 210 -
.../processors/hadoop/igfs/HadoopIgfsEx.java | 93 -
.../hadoop/igfs/HadoopIgfsFuture.java | 97 -
.../hadoop/igfs/HadoopIgfsInProc.java | 510 -
.../hadoop/igfs/HadoopIgfsInputStream.java | 629 -
.../processors/hadoop/igfs/HadoopIgfsIo.java | 76 -
.../processors/hadoop/igfs/HadoopIgfsIpcIo.java | 624 -
.../hadoop/igfs/HadoopIgfsIpcIoListener.java | 36 -
.../hadoop/igfs/HadoopIgfsJclLogger.java | 116 -
.../hadoop/igfs/HadoopIgfsOutProc.java | 524 -
.../hadoop/igfs/HadoopIgfsOutputStream.java | 201 -
.../hadoop/igfs/HadoopIgfsProperties.java | 86 -
.../hadoop/igfs/HadoopIgfsProxyInputStream.java | 337 -
.../igfs/HadoopIgfsProxyOutputStream.java | 165 -
...fsSecondaryFileSystemPositionedReadable.java | 105 -
.../hadoop/igfs/HadoopIgfsStreamDelegate.java | 96 -
.../igfs/HadoopIgfsStreamEventListener.java | 39 -
.../processors/hadoop/igfs/HadoopIgfsUtils.java | 174 -
.../hadoop/igfs/HadoopIgfsWrapper.java | 552 -
.../hadoop/jobtracker/HadoopJobMetadata.java | 316 -
.../hadoop/jobtracker/HadoopJobTracker.java | 1706 ---
.../hadoop/message/HadoopMessage.java | 27 -
.../planner/HadoopAbstractMapReducePlanner.java | 116 -
.../planner/HadoopDefaultMapReducePlan.java | 109 -
.../planner/HadoopMapReducePlanGroup.java | 150 -
.../planner/HadoopMapReducePlanTopology.java | 89 -
.../hadoop/proto/HadoopClientProtocol.java | 349 -
.../proto/HadoopProtocolJobCountersTask.java | 46 -
.../proto/HadoopProtocolJobStatusTask.java | 82 -
.../hadoop/proto/HadoopProtocolKillJobTask.java | 46 -
.../proto/HadoopProtocolNextTaskIdTask.java | 36 -
.../proto/HadoopProtocolSubmitJobTask.java | 59 -
.../hadoop/proto/HadoopProtocolTaskAdapter.java | 120 -
.../proto/HadoopProtocolTaskArguments.java | 84 -
.../hadoop/shuffle/HadoopShuffle.java | 263 -
.../hadoop/shuffle/HadoopShuffleAck.java | 92 -
.../hadoop/shuffle/HadoopShuffleJob.java | 612 -
.../hadoop/shuffle/HadoopShuffleMessage.java | 242 -
.../HadoopConcurrentHashMultimap.java | 616 -
.../shuffle/collections/HadoopHashMultimap.java | 176 -
.../collections/HadoopHashMultimapBase.java | 211 -
.../shuffle/collections/HadoopMultimap.java | 113 -
.../shuffle/collections/HadoopMultimapBase.java | 435 -
.../shuffle/collections/HadoopSkipList.java | 733 -
.../shuffle/streams/HadoopDataInStream.java | 171 -
.../shuffle/streams/HadoopDataOutStream.java | 130 -
.../shuffle/streams/HadoopOffheapBuffer.java | 122 -
.../HadoopEmbeddedTaskExecutor.java | 153 -
.../taskexecutor/HadoopExecutorService.java | 234 -
.../hadoop/taskexecutor/HadoopRunnableTask.java | 293 -
.../taskexecutor/HadoopTaskExecutorAdapter.java | 59 -
.../hadoop/taskexecutor/HadoopTaskState.java | 38 -
.../hadoop/taskexecutor/HadoopTaskStatus.java | 116 -
.../external/HadoopExternalTaskExecutor.java | 976 --
.../external/HadoopExternalTaskMetadata.java | 67 -
.../external/HadoopJobInfoUpdateRequest.java | 113 -
.../external/HadoopPrepareForJobRequest.java | 130 -
.../external/HadoopProcessDescriptor.java | 149 -
.../external/HadoopProcessStartedAck.java | 47 -
.../external/HadoopTaskExecutionRequest.java | 114 -
.../external/HadoopTaskFinishedMessage.java | 94 -
.../child/HadoopChildProcessRunner.java | 459 -
.../child/HadoopExternalProcessStarter.java | 301 -
.../HadoopAbstractCommunicationClient.java | 96 -
.../HadoopCommunicationClient.java | 72 -
.../HadoopExternalCommunication.java | 1460 --
.../HadoopHandshakeTimeoutException.java | 42 -
.../communication/HadoopIpcToNioAdapter.java | 248 -
.../communication/HadoopMarshallerFilter.java | 86 -
.../communication/HadoopMessageListener.java | 39 -
.../HadoopTcpNioCommunicationClient.java | 93 -
.../hadoop/v1/HadoopV1CleanupTask.java | 64 -
.../processors/hadoop/v1/HadoopV1Counter.java | 106 -
.../processors/hadoop/v1/HadoopV1MapTask.java | 122 -
.../hadoop/v1/HadoopV1OutputCollector.java | 137 -
.../hadoop/v1/HadoopV1Partitioner.java | 44 -
.../hadoop/v1/HadoopV1ReduceTask.java | 101 -
.../processors/hadoop/v1/HadoopV1Reporter.java | 81 -
.../processors/hadoop/v1/HadoopV1SetupTask.java | 56 -
.../processors/hadoop/v1/HadoopV1Splitter.java | 102 -
.../processors/hadoop/v1/HadoopV1Task.java | 97 -
.../processors/hadoop/v2/HadoopDaemon.java | 126 -
.../hadoop/v2/HadoopExternalSplit.java | 89 -
.../hadoop/v2/HadoopSerializationWrapper.java | 138 -
.../hadoop/v2/HadoopShutdownHookManager.java | 98 -
.../hadoop/v2/HadoopSplitWrapper.java | 119 -
.../hadoop/v2/HadoopV2CleanupTask.java | 72 -
.../processors/hadoop/v2/HadoopV2Context.java | 243 -
.../processors/hadoop/v2/HadoopV2Counter.java | 88 -
.../processors/hadoop/v2/HadoopV2Job.java | 445 -
.../hadoop/v2/HadoopV2JobResourceManager.java | 323 -
.../processors/hadoop/v2/HadoopV2MapTask.java | 99 -
.../hadoop/v2/HadoopV2Partitioner.java | 44 -
.../hadoop/v2/HadoopV2ReduceTask.java | 91 -
.../processors/hadoop/v2/HadoopV2SetupTask.java | 65 -
.../processors/hadoop/v2/HadoopV2Splitter.java | 111 -
.../processors/hadoop/v2/HadoopV2Task.java | 185 -
.../hadoop/v2/HadoopV2TaskContext.java | 560 -
.../hadoop/v2/HadoopWritableSerialization.java | 75 -
...op.mapreduce.protocol.ClientProtocolProvider | 1 -
.../HadoopClientProtocolEmbeddedSelfTest.java | 35 -
.../hadoop/HadoopClientProtocolSelfTest.java | 654 -
.../hadoop/cache/HadoopTxConfigCacheTest.java | 42 -
...KerberosHadoopFileSystemFactorySelfTest.java | 121 -
.../util/BasicUserNameMapperSelfTest.java | 133 -
.../util/ChainedUserNameMapperSelfTest.java | 107 -
.../util/KerberosUserNameMapperSelfTest.java | 99 -
.../ignite/igfs/Hadoop1DualAbstractTest.java | 158 -
.../igfs/Hadoop1OverIgfsDualAsyncTest.java | 30 -
.../igfs/Hadoop1OverIgfsDualSyncTest.java | 30 -
.../igfs/HadoopFIleSystemFactorySelfTest.java | 317 -
.../HadoopIgfs20FileSystemAbstractSelfTest.java | 2040 ---
...Igfs20FileSystemLoopbackPrimarySelfTest.java | 74 -
...oopIgfs20FileSystemShmemPrimarySelfTest.java | 74 -
.../igfs/HadoopIgfsDualAbstractSelfTest.java | 321 -
.../igfs/HadoopIgfsDualAsyncSelfTest.java | 32 -
.../ignite/igfs/HadoopIgfsDualSyncSelfTest.java | 32 -
...adoopIgfsSecondaryFileSystemTestAdapter.java | 149 -
...oopSecondaryFileSystemConfigurationTest.java | 575 -
.../apache/ignite/igfs/IgfsEventsTestSuite.java | 285 -
.../igfs/IgfsNearOnlyMultiNodeSelfTest.java | 223 -
.../IgniteHadoopFileSystemAbstractSelfTest.java | 2432 ---
.../IgniteHadoopFileSystemClientSelfTest.java | 212 -
...IgniteHadoopFileSystemHandshakeSelfTest.java | 389 -
.../IgniteHadoopFileSystemIpcCacheSelfTest.java | 214 -
.../IgniteHadoopFileSystemLoggerSelfTest.java | 298 -
...niteHadoopFileSystemLoggerStateSelfTest.java | 329 -
...adoopFileSystemLoopbackAbstractSelfTest.java | 46 -
...SystemLoopbackEmbeddedDualAsyncSelfTest.java | 33 -
...eSystemLoopbackEmbeddedDualSyncSelfTest.java | 33 -
...leSystemLoopbackEmbeddedPrimarySelfTest.java | 33 -
...SystemLoopbackEmbeddedSecondarySelfTest.java | 34 -
...SystemLoopbackExternalDualAsyncSelfTest.java | 33 -
...eSystemLoopbackExternalDualSyncSelfTest.java | 33 -
...leSystemLoopbackExternalPrimarySelfTest.java | 33 -
...SystemLoopbackExternalSecondarySelfTest.java | 34 -
...condaryFileSystemInitializationSelfTest.java | 214 -
...teHadoopFileSystemShmemAbstractSelfTest.java | 91 -
...ileSystemShmemEmbeddedDualAsyncSelfTest.java | 33 -
...FileSystemShmemEmbeddedDualSyncSelfTest.java | 33 -
...pFileSystemShmemEmbeddedPrimarySelfTest.java | 33 -
...ileSystemShmemEmbeddedSecondarySelfTest.java | 33 -
...ileSystemShmemExternalDualAsyncSelfTest.java | 33 -
...FileSystemShmemExternalDualSyncSelfTest.java | 33 -
...pFileSystemShmemExternalPrimarySelfTest.java | 33 -
...ileSystemShmemExternalSecondarySelfTest.java | 33 -
.../hadoop/HadoopAbstractMapReduceTest.java | 429 -
.../hadoop/HadoopAbstractSelfTest.java | 239 -
.../hadoop/HadoopAbstractWordCountTest.java | 175 -
.../hadoop/HadoopClassLoaderTest.java | 110 -
.../hadoop/HadoopCommandLineTest.java | 474 -
.../HadoopDefaultMapReducePlannerSelfTest.java | 615 -
.../processors/hadoop/HadoopErrorSimulator.java | 326 -
.../hadoop/HadoopFileSystemsTest.java | 155 -
.../processors/hadoop/HadoopGroupingTest.java | 307 -
.../hadoop/HadoopJobTrackerSelfTest.java | 345 -
.../hadoop/HadoopMapReduceEmbeddedSelfTest.java | 253 -
.../HadoopMapReduceErrorResilienceTest.java | 154 -
.../processors/hadoop/HadoopMapReduceTest.java | 66 -
.../hadoop/HadoopNoHadoopMapReduceTest.java | 47 -
.../processors/hadoop/HadoopPlannerMockJob.java | 168 -
.../hadoop/HadoopPopularWordsTest.java | 298 -
.../HadoopSerializationWrapperSelfTest.java | 79 -
.../processors/hadoop/HadoopSharedMap.java | 66 -
.../hadoop/HadoopSnappyFullMapReduceTest.java | 36 -
.../processors/hadoop/HadoopSnappyTest.java | 102 -
.../hadoop/HadoopSortingExternalTest.java | 46 -
.../processors/hadoop/HadoopSortingTest.java | 303 -
.../hadoop/HadoopSplitWrapperSelfTest.java | 72 -
.../processors/hadoop/HadoopStartup.java | 54 -
.../hadoop/HadoopTaskExecutionSelfTest.java | 567 -
.../hadoop/HadoopTasksAllVersionsTest.java | 260 -
.../processors/hadoop/HadoopTasksV1Test.java | 58 -
.../processors/hadoop/HadoopTasksV2Test.java | 77 -
.../hadoop/HadoopTestRoundRobinMrPlanner.java | 71 -
.../hadoop/HadoopTestTaskContext.java | 228 -
.../processors/hadoop/HadoopTestUtils.java | 178 -
.../hadoop/HadoopUserLibsSelfTest.java | 260 -
.../processors/hadoop/HadoopV2JobSelfTest.java | 100 -
.../hadoop/HadoopValidationSelfTest.java | 53 -
.../HadoopWeightedMapReducePlannerTest.java | 599 -
.../HadoopWeightedPlannerMapReduceTest.java | 38 -
.../hadoop/books/alice-in-wonderland.txt | 3735 -----
.../processors/hadoop/books/art-of-war.txt | 6982 ---------
.../hadoop/books/huckleberry-finn.txt | 11733 ---------------
.../processors/hadoop/books/sherlock-holmes.txt | 13052 -----------------
.../processors/hadoop/books/tom-sawyer.txt | 8858 -----------
.../hadoop/deps/CircularWIthHadoop.java | 32 -
.../hadoop/deps/CircularWithoutHadoop.java | 27 -
.../processors/hadoop/deps/WithCast.java | 41 -
.../hadoop/deps/WithClassAnnotation.java | 28 -
.../hadoop/deps/WithConstructorInvocation.java | 31 -
.../processors/hadoop/deps/WithExtends.java | 27 -
.../processors/hadoop/deps/WithField.java | 29 -
.../processors/hadoop/deps/WithImplements.java | 36 -
.../hadoop/deps/WithIndirectField.java | 27 -
.../processors/hadoop/deps/WithInitializer.java | 33 -
.../processors/hadoop/deps/WithInnerClass.java | 31 -
.../hadoop/deps/WithLocalVariable.java | 38 -
.../hadoop/deps/WithMethodAnnotation.java | 32 -
.../hadoop/deps/WithMethodArgument.java | 31 -
.../hadoop/deps/WithMethodCheckedException.java | 31 -
.../hadoop/deps/WithMethodInvocation.java | 31 -
.../hadoop/deps/WithMethodReturnType.java | 31 -
.../hadoop/deps/WithMethodRuntimeException.java | 31 -
.../processors/hadoop/deps/WithOuterClass.java | 38 -
.../hadoop/deps/WithParameterAnnotation.java | 31 -
.../processors/hadoop/deps/WithStaticField.java | 29 -
.../hadoop/deps/WithStaticInitializer.java | 34 -
.../processors/hadoop/deps/Without.java | 25 -
.../hadoop/examples/HadoopWordCount1.java | 94 -
.../hadoop/examples/HadoopWordCount1Map.java | 79 -
.../hadoop/examples/HadoopWordCount1Reduce.java | 61 -
.../hadoop/examples/HadoopWordCount2.java | 111 -
.../examples/HadoopWordCount2Combiner.java | 45 -
.../hadoop/examples/HadoopWordCount2Mapper.java | 88 -
.../examples/HadoopWordCount2Reducer.java | 113 -
.../collections/HadoopAbstractMapTest.java | 174 -
.../HadoopConcurrentHashMultimapSelftest.java | 278 -
.../collections/HadoopHashMapSelfTest.java | 131 -
.../collections/HadoopSkipListSelfTest.java | 318 -
.../streams/HadoopDataStreamSelfTest.java | 150 -
.../taskexecutor/HadoopExecutorServiceTest.java | 118 -
.../HadoopExternalTaskExecutionSelfTest.java | 232 -
.../HadoopExternalCommunicationSelfTest.java | 220 -
.../testsuites/IgniteHadoopTestSuite.java | 354 -
.../IgniteIgfsLinuxAndMacOSTestSuite.java | 72 -
.../src/main/js/app/data/pom-dependencies.json | 1 +
.../configuration/generator/Pom.service.js | 4 +-
pom.xml | 1 +
564 files changed, 98339 insertions(+), 97779 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/assembly/dependencies-fabric-lgpl.xml
----------------------------------------------------------------------
diff --git a/assembly/dependencies-fabric-lgpl.xml b/assembly/dependencies-fabric-lgpl.xml
index 2b4cf62..0eef736 100644
--- a/assembly/dependencies-fabric-lgpl.xml
+++ b/assembly/dependencies-fabric-lgpl.xml
@@ -126,6 +126,7 @@
<exclude>org.apache.ignite:ignite-visor-console_2.10</exclude>
<exclude>org.apache.ignite:ignite-visor-plugins</exclude>
<exclude>org.apache.ignite:ignite-hadoop</exclude>
+ <exclude>org.apache.ignite:ignite-hadoop-impl</exclude>
<exclude>org.apache.ignite:ignite-schema-import</exclude>
<exclude>org.apache.ignite:ignite-schema-import-db</exclude>
<exclude>org.apache.ignite:ignite-codegen</exclude>
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/assembly/dependencies-fabric.xml
----------------------------------------------------------------------
diff --git a/assembly/dependencies-fabric.xml b/assembly/dependencies-fabric.xml
index ff4075a..44a234e 100644
--- a/assembly/dependencies-fabric.xml
+++ b/assembly/dependencies-fabric.xml
@@ -126,6 +126,7 @@
<exclude>org.apache.ignite:ignite-visor-console_2.10</exclude>
<exclude>org.apache.ignite:ignite-visor-plugins</exclude>
<exclude>org.apache.ignite:ignite-hadoop</exclude>
+ <exclude>org.apache.ignite:ignite-hadoop-impl</exclude>
<exclude>org.apache.ignite:ignite-schema-import</exclude>
<exclude>org.apache.ignite:ignite-schema-import-db</exclude>
<exclude>org.apache.ignite:ignite-codegen</exclude>
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/assembly/dependencies-hadoop.xml
----------------------------------------------------------------------
diff --git a/assembly/dependencies-hadoop.xml b/assembly/dependencies-hadoop.xml
index 38646ba..ef0a3ce 100644
--- a/assembly/dependencies-hadoop.xml
+++ b/assembly/dependencies-hadoop.xml
@@ -113,6 +113,7 @@
<moduleSet>
<includes>
<include>org.apache.ignite:ignite-hadoop</include>
+ <include>org.apache.ignite:ignite-hadoop-impl</include>
</includes>
<sources>
<includeModuleDirectory>true</includeModuleDirectory>
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/assembly/libs/README.txt
----------------------------------------------------------------------
diff --git a/assembly/libs/README.txt b/assembly/libs/README.txt
index 38d8dbd..2fa3e7c 100644
--- a/assembly/libs/README.txt
+++ b/assembly/libs/README.txt
@@ -79,7 +79,8 @@ The following modules are available:
- ignite-logj4 (for Log4j logging)
- ignite-jcl (for Apache Commons logging)
- ignite-jta (for XA integration)
-- ignite-hadoop (for Apache Hadoop Accelerator)
+- ignite-hadoop (for Apache Hadoop Accelerator interfaces)
+- ignite-hadoop-impl (for Apache Hadoop Accelerator implementation)
- ignite-rest-http (for HTTP REST messages)
- ignite-scalar (for ignite Scala API)
- ignite-sl4j (for SL4J logging)
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
index 76e495f..b182bd8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
@@ -38,7 +38,7 @@ public enum IgniteComponentType {
HADOOP(
"org.apache.ignite.internal.processors.hadoop.HadoopNoopProcessor",
"org.apache.ignite.internal.processors.hadoop.HadoopProcessor",
- "ignite-hadoop"
+ "ignite-hadoop-impl"
),
/** IGFS helper component. */
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
index 501870a..9f388fe 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
@@ -79,8 +79,9 @@ public class HadoopNoopProcessor extends HadoopProcessorAdapter {
* Creates an exception to be uniformly thrown from all the methods.
*/
private IllegalStateException createException() {
- return new IllegalStateException("Hadoop module is not loaded (please ensure that ignite-hadoop.jar is in " +
- "classpath and IgniteConfiguration.peerClassLoadingEnabled is set to false).");
+ return new IllegalStateException("Hadoop module is not loaded (please ensure that ignite-hadoop.jar and " +
+ "ignite-hadoop-impl.jar are in libs and IgniteConfiguration.peerClassLoadingEnabled is set to " +
+ "false).");
}
/** {@inheritDoc} */
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/README.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/README.txt b/modules/hadoop-impl/README.txt
new file mode 100644
index 0000000..ecd47e0
--- /dev/null
+++ b/modules/hadoop-impl/README.txt
@@ -0,0 +1,33 @@
+Apache Ignite Hadoop Module
+---------------------------
+
+Apache Ignite Hadoop module provides In-Memory MapReduce engine and driver to use IGFS as Hadoop file system
+which are 100% compatible with HDFS and YARN.
+
+To enable Hadoop module when starting a standalone node, move 'optional/ignite-hadoop' folder to
+'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will
+be added to classpath in this case.
+
+Importing Hadoop Module In Maven Project
+----------------------------------------
+
+If you are using Maven to manage dependencies of your project, you can add Hadoop module
+dependency like this (replace '${ignite.version}' with actual Ignite version you are
+interested in):
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+ http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ ...
+ <dependencies>
+ ...
+ <dependency>
+ <groupId>org.apache.ignite</groupId>
+ <artifactId>ignite-hadoop</artifactId>
+ <version>${ignite.version}</version>
+ </dependency>
+ ...
+ </dependencies>
+ ...
+</project>
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/config/core-site.ignite.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/config/core-site.ignite.xml b/modules/hadoop-impl/config/core-site.ignite.xml
new file mode 100644
index 0000000..8b8e634
--- /dev/null
+++ b/modules/hadoop-impl/config/core-site.ignite.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+ This template file contains settings needed to run Apache Hadoop jobs
+ with Apache Ignite's distributed in-memory file system IGFS.
+
+ You can replace '$HADOOP_HOME/etc/hadoop/core-site.xml' file with this one
+ to work with IGFS nodes running on localhost (these local nodes can be
+ a part of distributed cluster though). To work with file system on remote
+ hosts you need to change the host of file system URI to any host running
+ IGFS node.
+
+ Note that Ignite jars must be in Apache Hadoop client classpath to work
+ with this configuration.
+
+ Run script '$IGNITE_HOME/bin/setup-hadoop.{sh|bat}' for Apache Hadoop client setup.
+-->
+
+<configuration>
+ <!--
+ Set default file system to IGFS instance named "igfs" configured in Ignite.
+ -->
+ <property>
+ <name>fs.default.name</name>
+ <value>igfs://igfs@localhost</value>
+ </property>
+
+ <!--
+ Set Hadoop 1.* file system implementation class for IGFS.
+ -->
+ <property>
+ <name>fs.igfs.impl</name>
+ <value>org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem</value>
+ </property>
+
+ <!--
+ Set Hadoop 2.* file system implementation class for IGFS.
+ -->
+ <property>
+ <name>fs.AbstractFileSystem.igfs.impl</name>
+ <value>org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem</value>
+ </property>
+
+ <!--
+ Disallow data node replacement since it does not make sense for IGFS nodes.
+ -->
+ <property>
+ <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
+ <value>NEVER</value>
+ </property>
+
+ <!--
+ Allow to write the job statistics into IGFS.
+ -->
+ <!--
+ <property>
+ <name>ignite.counters.writer</name>
+ <value>org.apache.ignite.hadoop.fs.IgniteHadoopFileSystemCounterWriter</value>
+ </property>
+ -->
+
+ <!--
+ By default data is placed into the file /user/<user_name>/<job_id>/performance
+ You can override this path with using macro ${USER} that is to injection of submitter user name.
+ -->
+ <!--
+ <property>
+ <name>ignite.counters.fswriter.directory</name>
+ <value>/user/${USER}</value>
+ </property>
+ -->
+</configuration>
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/config/hive-site.ignite.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/config/hive-site.ignite.xml b/modules/hadoop-impl/config/hive-site.ignite.xml
new file mode 100644
index 0000000..f278aab
--- /dev/null
+++ b/modules/hadoop-impl/config/hive-site.ignite.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+ This template file contains settings needed to run Apache Hive queries
+ with Ignite In-Memory Accelerator.
+
+ You can replace '$HIVE_HOME/conf/hive-site.xml' file with this one or
+ run script '$IGNITE_HOME/bin/setup-hadoop.{sh|bat}' for Apache Hadoop
+ and Hive client setup.
+-->
+<configuration>
+ <!--
+ Ignite requires query plan to be passed not using local resource.
+ -->
+ <property>
+ <name>hive.rpc.query.plan</name>
+ <value>true</value>
+ </property>
+</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/config/mapred-site.ignite.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/config/mapred-site.ignite.xml b/modules/hadoop-impl/config/mapred-site.ignite.xml
new file mode 100644
index 0000000..a2ed437
--- /dev/null
+++ b/modules/hadoop-impl/config/mapred-site.ignite.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+ This template file contains settings needed to run Apache Hadoop jobs
+ with Apache Ignite In-Memory Accelerator.
+
+ You can replace '$HADOOP_HOME/etc/hadoop/mapred-site.xml' file with this one
+ to run jobs on localhost (local node can be a part of distributed cluster though).
+ To run jobs on remote host you have to change jobtracker address to the REST address
+ of any running Ignite node.
+
+ Note that Ignite jars must be in Apache Hadoop client classpath to work
+ with this configuration.
+
+ Run script '$IGNITE_HOME/bin/setup-hadoop.{sh|bat}' for Apache Hadoop client setup.
+-->
+
+<configuration>
+ <!--
+ Framework name must be set to 'ignite'.
+ -->
+ <property>
+ <name>mapreduce.framework.name</name>
+ <value>ignite</value>
+ </property>
+
+ <!--
+ Job tracker address must be set to the REST address of any running Ignite node.
+ -->
+ <property>
+ <name>mapreduce.jobtracker.address</name>
+ <value>localhost:11211</value>
+ </property>
+
+ <!-- Parameters for job tuning. -->
+ <!--
+ <property>
+ <name>mapreduce.job.reduces</name>
+ <value>1</value>
+ </property>
+
+ <property>
+ <name>mapreduce.job.maps</name>
+ <value>4</value>
+ </property>
+ -->
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/licenses/apache-2.0.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/licenses/apache-2.0.txt b/modules/hadoop-impl/licenses/apache-2.0.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/modules/hadoop-impl/licenses/apache-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/pom.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/pom.xml b/modules/hadoop-impl/pom.xml
new file mode 100644
index 0000000..5b25cda
--- /dev/null
+++ b/modules/hadoop-impl/pom.xml
@@ -0,0 +1,158 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+ POM file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.ignite</groupId>
+ <artifactId>ignite-parent</artifactId>
+ <version>1</version>
+ <relativePath>../../parent</relativePath>
+ </parent>
+
+ <artifactId>ignite-hadoop-impl</artifactId>
+ <version>1.7.0-SNAPSHOT</version>
+ <url>http://ignite.apache.org</url>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.ignite</groupId>
+ <artifactId>ignite-core</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.ignite</groupId>
+ <artifactId>ignite-hadoop</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.ow2.asm</groupId>
+ <artifactId>asm-all</artifactId>
+ <version>4.2</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-annotations</artifactId>
+ <version>${hadoop.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-auth</artifactId>
+ <version>${hadoop.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>${hadoop.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ <version>${hadoop.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ <version>${hadoop.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.gridgain</groupId>
+ <artifactId>ignite-shmem</artifactId>
+ <scope>test</scope>
+ <version>1.0.0</version>
+ </dependency>
+
+ <dependency>
+ <groupId>commons-beanutils</groupId>
+ <artifactId>commons-beanutils</artifactId>
+ <version>${commons.beanutils.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.ignite</groupId>
+ <artifactId>ignite-spring</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.ignite</groupId>
+ <artifactId>ignite-core</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.ignite</groupId>
+ <artifactId>ignite-hadoop</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.2</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.17</version>
+ <configuration>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
new file mode 100644
index 0000000..a01bfaf
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.hadoop.util.KerberosUserNameMapper;
+import org.apache.ignite.hadoop.util.UserNameMapper;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleAware;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.Arrays;
+
+/**
+ * Simple Hadoop file system factory which delegates to {@code FileSystem.get()} on each call.
+ * <p>
+ * If {@code "fs.[prefix].impl.disable.cache"} is set to {@code true}, file system instances will be cached by Hadoop.
+ */
+public class BasicHadoopFileSystemFactory implements HadoopFileSystemFactory, Externalizable, LifecycleAware {
+ /** */
+ private static final long serialVersionUID = 0L;
+
+ /** File system URI. */
+ private String uri;
+
+ /** File system config paths. */
+ private String[] cfgPaths;
+
+ /** User name mapper. */
+ private UserNameMapper usrNameMapper;
+
+ /** Configuration of the secondary filesystem, never null. */
+ protected transient Configuration cfg;
+
+ /** Resulting URI. */
+ protected transient URI fullUri;
+
+ /**
+ * Constructor.
+ */
+ public BasicHadoopFileSystemFactory() {
+ // No-op.
+ }
+
+ /** {@inheritDoc} */
+ @Override public final FileSystem get(String name) throws IOException {
+ String name0 = IgfsUtils.fixUserName(name);
+
+ if (usrNameMapper != null)
+ name0 = IgfsUtils.fixUserName(usrNameMapper.map(name0));
+
+ return getWithMappedName(name0);
+ }
+
+ /**
+ * Internal file system create routine.
+ *
+ * @param usrName User name.
+ * @return File system.
+ * @throws IOException If failed.
+ */
+ protected FileSystem getWithMappedName(String usrName) throws IOException {
+ assert cfg != null;
+
+ try {
+ // FileSystem.get() might delegate to ServiceLoader to get the list of file system implementation.
+ // And ServiceLoader is known to be sensitive to context classloader. Therefore, we change context
+ // classloader to classloader of current class to avoid strange class-cast-exceptions.
+ ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
+
+ try {
+ return create(usrName);
+ }
+ finally {
+ HadoopUtils.restoreContextClassLoader(oldLdr);
+ }
+ }
+ catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+
+ throw new IOException("Failed to create file system due to interrupt.", e);
+ }
+ }
+
+ /**
+ * Internal file system creation routine, invoked in correct class loader context.
+ *
+ * @param usrName User name.
+ * @return File system.
+ * @throws IOException If failed.
+ * @throws InterruptedException if the current thread is interrupted.
+ */
+ protected FileSystem create(String usrName) throws IOException, InterruptedException {
+ return FileSystem.get(fullUri, cfg, usrName);
+ }
+
+ /**
+ * Gets file system URI.
+ * <p>
+ * This URI will be used as a first argument when calling {@link FileSystem#get(URI, Configuration, String)}.
+ * <p>
+ * If not set, default URI will be picked from file system configuration using
+ * {@link FileSystem#getDefaultUri(Configuration)} method.
+ *
+ * @return File system URI.
+ */
+ @Nullable public String getUri() {
+ return uri;
+ }
+
+ /**
+ * Sets file system URI. See {@link #getUri()} for more information.
+ *
+ * @param uri File system URI.
+ */
+ public void setUri(@Nullable String uri) {
+ this.uri = uri;
+ }
+
+ /**
+ * Gets paths to additional file system configuration files (e.g. core-site.xml).
+ * <p>
+ * Path could be either absolute or relative to {@code IGNITE_HOME} environment variable.
+ * <p>
+ * All provided paths will be loaded in the order they provided and then applied to {@link Configuration}. It means
+ * that path order might be important in some cases.
+ * <p>
+ * <b>NOTE!</b> Factory can be serialized and transferred to other machines where instance of
+ * {@link IgniteHadoopFileSystem} resides. Corresponding paths must exist on these machines as well.
+ *
+ * @return Paths to file system configuration files.
+ */
+ @Nullable public String[] getConfigPaths() {
+ return cfgPaths;
+ }
+
+ /**
+ * Set paths to additional file system configuration files (e.g. core-site.xml). See {@link #getConfigPaths()} for
+ * more information.
+ *
+ * @param cfgPaths Paths to file system configuration files.
+ */
+ public void setConfigPaths(@Nullable String... cfgPaths) {
+ this.cfgPaths = cfgPaths;
+ }
+
+ /**
+ * Get optional user name mapper.
+ * <p>
+ * When IGFS is invoked from Hadoop, user name is passed along the way to ensure that request will be performed
+ * with proper user context. User name is passed in a simple form and doesn't contain any extended information,
+ * such as host, domain or Kerberos realm. You may use name mapper to translate plain user name to full user
+ * name required by security engine of the underlying file system.
+ * <p>
+ * For example you may want to use {@link KerberosUserNameMapper} to user name from {@code "johndoe"} to
+ * {@code "johndoe@YOUR.REALM.COM"}.
+ *
+ * @return User name mapper.
+ */
+ @Nullable public UserNameMapper getUserNameMapper() {
+ return usrNameMapper;
+ }
+
+ /**
+ * Set optional user name mapper. See {@link #getUserNameMapper()} for more information.
+ *
+ * @param usrNameMapper User name mapper.
+ */
+ public void setUserNameMapper(@Nullable UserNameMapper usrNameMapper) {
+ this.usrNameMapper = usrNameMapper;
+ }
+
+ /** {@inheritDoc} */
+ @Override public void start() throws IgniteException {
+ cfg = HadoopUtils.safeCreateConfiguration();
+
+ if (cfgPaths != null) {
+ for (String cfgPath : cfgPaths) {
+ if (cfgPath == null)
+ throw new NullPointerException("Configuration path cannot be null: " + Arrays.toString(cfgPaths));
+ else {
+ URL url = U.resolveIgniteUrl(cfgPath);
+
+ if (url == null) {
+ // If secConfPath is given, it should be resolvable:
+ throw new IgniteException("Failed to resolve secondary file system configuration path " +
+ "(ensure that it exists locally and you have read access to it): " + cfgPath);
+ }
+
+ cfg.addResource(url);
+ }
+ }
+ }
+
+ // If secondary fs URI is not given explicitly, try to get it from the configuration:
+ if (uri == null)
+ fullUri = FileSystem.getDefaultUri(cfg);
+ else {
+ try {
+ fullUri = new URI(uri);
+ }
+ catch (URISyntaxException use) {
+ throw new IgniteException("Failed to resolve secondary file system URI: " + uri);
+ }
+ }
+
+ if (usrNameMapper != null && usrNameMapper instanceof LifecycleAware)
+ ((LifecycleAware)usrNameMapper).start();
+ }
+
+ /** {@inheritDoc} */
+ @Override public void stop() throws IgniteException {
+ if (usrNameMapper != null && usrNameMapper instanceof LifecycleAware)
+ ((LifecycleAware)usrNameMapper).stop();
+ }
+
+ /** {@inheritDoc} */
+ @Override public void writeExternal(ObjectOutput out) throws IOException {
+ U.writeString(out, uri);
+
+ if (cfgPaths != null) {
+ out.writeInt(cfgPaths.length);
+
+ for (String cfgPath : cfgPaths)
+ U.writeString(out, cfgPath);
+ }
+ else
+ out.writeInt(-1);
+
+ out.writeObject(usrNameMapper);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ uri = U.readString(in);
+
+ int cfgPathsCnt = in.readInt();
+
+ if (cfgPathsCnt != -1) {
+ cfgPaths = new String[cfgPathsCnt];
+
+ for (int i = 0; i < cfgPathsCnt; i++)
+ cfgPaths[i] = U.readString(in);
+ }
+
+ usrNameMapper = (UserNameMapper)in.readObject();
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
new file mode 100644
index 0000000..bcbb082
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Caching Hadoop file system factory. Caches {@link FileSystem} instances on per-user basis. Doesn't rely on
+ * built-in Hadoop {@code FileSystem} caching mechanics. Separate {@code FileSystem} instance is created for each
+ * user instead.
+ * <p>
+ * This makes cache instance resistant to concurrent calls to {@link FileSystem#close()} in other parts of the user
+ * code. On the other hand, this might cause problems on some environments. E.g. if Kerberos is enabled, a call to
+ * {@link FileSystem#get(URI, Configuration, String)} will refresh Kerberos token. But this factory implementation
+ * calls this method only once per user what may lead to token expiration. In such cases it makes sense to either
+ * use {@link BasicHadoopFileSystemFactory} or implement your own factory.
+ */
+public class CachingHadoopFileSystemFactory extends BasicHadoopFileSystemFactory {
+ /** */
+ private static final long serialVersionUID = 0L;
+
+ /** Per-user file system cache. */
+ private final transient HadoopLazyConcurrentMap<String, FileSystem> cache = new HadoopLazyConcurrentMap<>(
+ new HadoopLazyConcurrentMap.ValueFactory<String, FileSystem>() {
+ @Override public FileSystem createValue(String key) throws IOException {
+ return CachingHadoopFileSystemFactory.super.getWithMappedName(key);
+ }
+ }
+ );
+
+ /**
+ * Public non-arg constructor.
+ */
+ public CachingHadoopFileSystemFactory() {
+ // noop
+ }
+
+ /** {@inheritDoc} */
+ @Override public FileSystem getWithMappedName(String name) throws IOException {
+ return cache.getOrCreate(name);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void start() throws IgniteException {
+ super.start();
+
+ // Disable caching.
+ cfg.setBoolean(HadoopFileSystemsUtils.disableFsCachePropertyName(fullUri.getScheme()), true);
+ }
+
+ /** {@inheritDoc} */
+ @Override public void stop() throws IgniteException {
+ super.stop();
+
+ try {
+ cache.close();
+ }
+ catch (IgniteCheckedException ice) {
+ throw new IgniteException(ice);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
new file mode 100644
index 0000000..5ad08ab
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.igfs.IgfsMode;
+import org.apache.ignite.lifecycle.LifecycleAware;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * Factory for Hadoop {@link FileSystem} used by {@link IgniteHadoopIgfsSecondaryFileSystem}.
+ * <p>
+ * {@link #get(String)} method will be used whenever a call to a target {@code FileSystem} is required.
+ * <p>
+ * It is implementation dependent whether to rely on built-in Hadoop file system cache, implement own caching facility
+ * or doesn't cache file systems at all.
+ * <p>
+ * Concrete factory may implement {@link LifecycleAware} interface. In this case start and stop callbacks will be
+ * performed by Ignite. You may want to implement some initialization or cleanup there.
+ * <p>
+ * Note that factory extends {@link Serializable} interface as it might be necessary to transfer factories over the
+ * wire to {@link IgniteHadoopFileSystem} if {@link IgfsMode#PROXY} is enabled for some file
+ * system paths.
+ */
+public interface HadoopFileSystemFactory extends Serializable {
+ /**
+ * Gets file system for the given user name.
+ *
+ * @param usrName User name
+ * @return File system.
+ * @throws IOException In case of error.
+ */
+ public FileSystem get(String usrName) throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/ignite/blob/b7489457/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
new file mode 100644
index 0000000..8085826
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopDefaultJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.T2;
+
+/**
+ * Statistic writer implementation that writes info into any Hadoop file system.
+ */
+public class IgniteHadoopFileSystemCounterWriter implements HadoopCounterWriter {
+ /** */
+ public static final String PERFORMANCE_COUNTER_FILE_NAME = "performance";
+
+ /** */
+ public static final String COUNTER_WRITER_DIR_PROPERTY = "ignite.counters.fswriter.directory";
+
+ /** */
+ private static final String USER_MACRO = "${USER}";
+
+ /** */
+ private static final String DEFAULT_COUNTER_WRITER_DIR = "/user/" + USER_MACRO;
+
+ /** {@inheritDoc} */
+ @Override public void write(HadoopJob job, HadoopCounters cntrs)
+ throws IgniteCheckedException {
+
+ Configuration hadoopCfg = HadoopUtils.safeCreateConfiguration();
+
+ final HadoopJobInfo jobInfo = job.info();
+
+ final HadoopJobId jobId = job.id();
+
+ for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo)jobInfo).properties().entrySet())
+ hadoopCfg.set(e.getKey(), e.getValue());
+
+ String user = jobInfo.user();
+
+ user = IgfsUtils.fixUserName(user);
+
+ String dir = jobInfo.property(COUNTER_WRITER_DIR_PROPERTY);
+
+ if (dir == null)
+ dir = DEFAULT_COUNTER_WRITER_DIR;
+
+ Path jobStatPath = new Path(new Path(dir.replace(USER_MACRO, user)), jobId.toString());
+
+ HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null);
+
+ try {
+ hadoopCfg.set(MRJobConfig.USER_NAME, user);
+
+ FileSystem fs = ((HadoopV2Job)job).fileSystem(jobStatPath.toUri(), hadoopCfg);
+
+ fs.mkdirs(jobStatPath);
+
+ try (PrintStream out = new PrintStream(fs.create(new Path(jobStatPath, PERFORMANCE_COUNTER_FILE_NAME)))) {
+ for (T2<String, Long> evt : perfCntr.evts()) {
+ out.print(evt.get1());
+ out.print(':');
+ out.println(evt.get2().toString());
+ }
+
+ out.flush();
+ }
+ }
+ catch (IOException e) {
+ throw new IgniteCheckedException(e);
+ }
+ }
+}
\ No newline at end of file