You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by al...@apache.org on 2020/07/28 11:16:51 UTC

[ignite] branch ignite-2.9 updated: IGNITE-11942 Remove IGFS and Hadoop support - Fixes #8002.

This is an automated email from the ASF dual-hosted git repository.

alexpl pushed a commit to branch ignite-2.9
in repository https://gitbox.apache.org/repos/asf/ignite.git


The following commit(s) were added to refs/heads/ignite-2.9 by this push:
     new e79d99d  IGNITE-11942 Remove IGFS and Hadoop support - Fixes #8002.
e79d99d is described below

commit e79d99df50ebbea7ba1746eb58fe2d8ff505bb72
Author: Anton Kalashnikov <ka...@yandex.ru>
AuthorDate: Tue Jul 28 14:50:06 2020 +0500

    IGNITE-11942 Remove IGFS and Hadoop support - Fixes #8002.
    
    Signed-off-by: Alexey Goncharuk <al...@gmail.com>
    
    (cherry picked from commit 1b9fe33740c31b6947016230ef50ae5bce88bad2)
---
 DEVNOTES.txt                                       |    26 -
 assembly/dependencies-apache-ignite-hadoop.xml     |   166 -
 assembly/dependencies-apache-ignite-lgpl.xml       |     1 -
 assembly/dependencies-apache-ignite.xml            |     1 -
 assembly/libs/README.txt                           |     1 -
 bin/ignite.sh                                      |     9 -
 config/hadoop/default-config.xml                   |   120 -
 examples/config/filesystem/README.txt              |     8 -
 examples/config/filesystem/core-site.xml           |    42 -
 examples/config/filesystem/example-igfs.xml        |   118 -
 .../apache/ignite/examples/igfs/IgfsExample.java   |   284 -
 .../ignite/examples/igfs/IgfsMapReduceExample.java |   269 -
 .../ignite/examples/igfs/IgfsNodeStartup.java      |    42 -
 .../apache/ignite/examples/igfs/package-info.java  |    23 -
 .../memcache/MemcacheRestExampleNodeStartup.java   |     2 +-
 .../ignite/examples/IgfsExamplesSelfTest.java      |    53 -
 .../testsuites/IgniteExamplesSelfTestSuite.java    |     2 -
 .../rest/JettyRestProcessorAbstractSelfTest.java   |    60 -
 .../src/main/java/org/apache/ignite/Ignite.java    |    22 -
 .../java/org/apache/ignite/IgniteFileSystem.java   |   541 -
 .../cache/eviction/igfs/IgfsEvictionFilter.java    |    37 -
 .../igfs/IgfsPerBlockLruEvictionPolicy.java        |   479 -
 .../igfs/IgfsPerBlockLruEvictionPolicyMXBean.java  |    92 -
 .../ignite/cache/eviction/igfs/package-info.java   |    22 -
 .../configuration/FileSystemConfiguration.java     |   837 --
 .../ignite/configuration/HadoopConfiguration.java  |   230 -
 .../ignite/configuration/IgniteConfiguration.java  |    78 -
 .../java/org/apache/ignite/events/EventType.java   |   167 -
 .../java/org/apache/ignite/events/IgfsEvent.java   |   198 -
 .../org/apache/ignite/hadoop/HadoopInputSplit.java |    54 -
 .../java/org/apache/ignite/hadoop/HadoopJob.java   |    74 -
 .../apache/ignite/hadoop/HadoopMapReducePlan.java  |    80 -
 .../ignite/hadoop/HadoopMapReducePlanner.java      |    40 -
 .../org/apache/ignite/hadoop/package-info.java     |    23 -
 .../org/apache/ignite/igfs/IgfsBlockLocation.java  |    64 -
 .../igfs/IgfsConcurrentModificationException.java  |    57 -
 .../ignite/igfs/IgfsCorruptedFileException.java    |    56 -
 .../igfs/IgfsDirectoryNotEmptyException.java       |    56 -
 .../java/org/apache/ignite/igfs/IgfsException.java |    57 -
 .../main/java/org/apache/ignite/igfs/IgfsFile.java |   119 -
 .../ignite/igfs/IgfsGroupDataBlocksKeyMapper.java  |   139 -
 .../org/apache/ignite/igfs/IgfsInputStream.java    |    81 -
 .../igfs/IgfsInvalidHdfsVersionException.java      |    57 -
 .../ignite/igfs/IgfsInvalidPathException.java      |    57 -
 .../ignite/igfs/IgfsIpcEndpointConfiguration.java  |   289 -
 .../apache/ignite/igfs/IgfsIpcEndpointType.java    |    29 -
 .../java/org/apache/ignite/igfs/IgfsMetrics.java   |   160 -
 .../ignite/igfs/IgfsOutOfSpaceException.java       |    58 -
 .../org/apache/ignite/igfs/IgfsOutputStream.java   |    37 -
 .../igfs/IgfsParentNotDirectoryException.java      |    56 -
 .../main/java/org/apache/ignite/igfs/IgfsPath.java |   295 -
 .../igfs/IgfsPathAlreadyExistsException.java       |    56 -
 .../ignite/igfs/IgfsPathIsDirectoryException.java  |    56 -
 .../igfs/IgfsPathIsNotDirectoryException.java      |    56 -
 .../ignite/igfs/IgfsPathNotFoundException.java     |    56 -
 .../org/apache/ignite/igfs/IgfsPathSummary.java    |   169 -
 .../org/apache/ignite/igfs/IgfsUserContext.java    |   118 -
 .../ignite/igfs/mapreduce/IgfsFileRange.java       |    80 -
 .../igfs/mapreduce/IgfsInputStreamJobAdapter.java  |    52 -
 .../org/apache/ignite/igfs/mapreduce/IgfsJob.java  |    69 -
 .../ignite/igfs/mapreduce/IgfsJobAdapter.java      |    28 -
 .../igfs/mapreduce/IgfsRangeInputStream.java       |   197 -
 .../ignite/igfs/mapreduce/IgfsRecordResolver.java  |    57 -
 .../org/apache/ignite/igfs/mapreduce/IgfsTask.java |   180 -
 .../apache/ignite/igfs/mapreduce/IgfsTaskArgs.java |    81 -
 .../igfs/mapreduce/IgfsTaskNoReduceAdapter.java    |    41 -
 .../apache/ignite/igfs/mapreduce/package-info.java |    23 -
 .../records/IgfsByteDelimiterRecordResolver.java   |   354 -
 .../records/IgfsFixedLengthRecordResolver.java     |    90 -
 .../records/IgfsNewLineRecordResolver.java         |    64 -
 .../records/IgfsStringDelimiterRecordResolver.java |    83 -
 .../igfs/mapreduce/records/package-info.java       |    23 -
 .../java/org/apache/ignite/igfs/package-info.java  |    23 -
 .../igfs/secondary/IgfsSecondaryFileSystem.java    |   223 -
 .../IgfsSecondaryFileSystemPositionedReadable.java |    39 -
 .../local/LocalIgfsSecondaryFileSystem.java        |   603 -
 .../ignite/igfs/secondary/local/package-info.java  |    23 -
 .../apache/ignite/igfs/secondary/package-info.java |    23 -
 .../apache/ignite/internal/GridKernalContext.java  |    39 -
 .../ignite/internal/GridKernalContextImpl.java     |    63 +-
 .../java/org/apache/ignite/internal/GridTopic.java |    10 +-
 .../ignite/internal/IgniteComponentType.java       |    12 +-
 .../java/org/apache/ignite/internal/IgniteEx.java  |    17 -
 .../org/apache/ignite/internal/IgniteKernal.java   |   137 +-
 .../ignite/internal/IgniteNodeAttributes.java      |     3 -
 .../org/apache/ignite/internal/IgnitionEx.java     |    44 -
 .../ignite/internal/binary/BinaryContext.java      |    79 -
 .../internal/cluster/ClusterGroupAdapter.java      |     8 -
 .../ignite/internal/cluster/ClusterGroupEx.java    |     9 -
 .../internal/igfs/common/IgfsControlResponse.java  |   668 -
 .../internal/igfs/common/IgfsDataInputStream.java  |    43 -
 .../internal/igfs/common/IgfsDataOutputStream.java |    47 -
 .../internal/igfs/common/IgfsHandshakeRequest.java |    76 -
 .../internal/igfs/common/IgfsIpcCommand.java       |   102 -
 .../ignite/internal/igfs/common/IgfsLogger.java    |   769 --
 .../internal/igfs/common/IgfsMarshaller.java       |   353 -
 .../ignite/internal/igfs/common/IgfsMessage.java   |    41 -
 .../igfs/common/IgfsModeResolverRequest.java       |    35 -
 .../igfs/common/IgfsPathControlRequest.java        |   259 -
 .../internal/igfs/common/IgfsStatusRequest.java    |    35 -
 .../igfs/common/IgfsStreamControlRequest.java      |   101 -
 .../ignite/internal/igfs/common/package-info.java  |    23 -
 .../internal/managers/IgniteMBeansManager.java     |     3 -
 .../managers/communication/GridIoManager.java      |     2 -
 .../communication/GridIoMessageFactory.java        |    30 +-
 .../managers/communication/GridIoPolicy.java       |     3 -
 .../internal/processors/cache/CacheType.java       |     2 +-
 .../processors/cache/GridCacheAdapter.java         |    46 -
 .../processors/cache/GridCacheProcessor.java       |    22 -
 .../processors/cache/GridCacheProxyImpl.java       |    24 -
 .../internal/processors/cache/GridCacheUtils.java  |    63 +-
 .../cache/IgniteCacheOffheapManagerImpl.java       |    55 -
 .../processors/cache/IgniteInternalCache.java      |    14 -
 .../binary/CacheObjectBinaryProcessorImpl.java     |     3 +-
 .../distributed/near/GridNearCacheAdapter.java     |    15 -
 .../wal/reader/StandaloneGridKernalContext.java    |    29 -
 .../cluster/GridClusterStateProcessor.java         |     2 -
 .../ignite/internal/processors/hadoop/Hadoop.java  |    88 -
 .../processors/hadoop/HadoopClassLoader.java       |   511 -
 .../processors/hadoop/HadoopClasspathUtils.java    |   425 -
 .../processors/hadoop/HadoopDefaultJobInfo.java    |   170 -
 .../processors/hadoop/HadoopFileBlock.java         |   165 -
 .../internal/processors/hadoop/HadoopHelper.java   |    67 -
 .../internal/processors/hadoop/HadoopJobEx.java    |   140 -
 .../internal/processors/hadoop/HadoopJobId.java    |   182 -
 .../internal/processors/hadoop/HadoopJobInfo.java  |    95 -
 .../internal/processors/hadoop/HadoopJobPhase.java |    38 -
 .../processors/hadoop/HadoopJobProperty.java       |   181 -
 .../processors/hadoop/HadoopJobStatus.java         |   211 -
 .../processors/hadoop/HadoopLocations.java         |   123 -
 .../hadoop/HadoopMapperAwareTaskOutput.java        |    32 -
 .../processors/hadoop/HadoopNoopHelper.java        |    74 -
 .../processors/hadoop/HadoopNoopProcessor.java     |    90 -
 .../processors/hadoop/HadoopPartitioner.java       |    33 -
 .../processors/hadoop/HadoopProcessorAdapter.java  |   104 -
 .../processors/hadoop/HadoopSerialization.java     |    54 -
 .../internal/processors/hadoop/HadoopTask.java     |    71 -
 .../processors/hadoop/HadoopTaskContext.java       |   220 -
 .../internal/processors/hadoop/HadoopTaskInfo.java |   199 -
 .../processors/hadoop/HadoopTaskInput.java         |    54 -
 .../processors/hadoop/HadoopTaskOutput.java        |    40 -
 .../internal/processors/hadoop/HadoopTaskType.java |    56 -
 .../processors/hadoop/counter/HadoopCounter.java   |    44 -
 .../hadoop/counter/HadoopCounterWriter.java        |    36 -
 .../processors/hadoop/counter/HadoopCounters.java  |    49 -
 .../hadoop/io/PartiallyOffheapRawComparatorEx.java |    33 -
 .../processors/hadoop/message/HadoopMessage.java   |    27 -
 .../internal/processors/hadoop/package-info.java   |    23 -
 .../hadoop/shuffle/HadoopDirectShuffleMessage.java |   272 -
 .../hadoop/shuffle/HadoopShuffleAck.java           |   169 -
 .../hadoop/shuffle/HadoopShuffleFinishRequest.java |   171 -
 .../shuffle/HadoopShuffleFinishResponse.java       |   141 -
 .../hadoop/shuffle/HadoopShuffleMessage.java       |   363 -
 .../processors/igfs/IgfsAbstractOutputStream.java  |   264 -
 .../internal/processors/igfs/IgfsAckMessage.java   |   198 -
 .../internal/processors/igfs/IgfsAsyncImpl.java    |   354 -
 .../internal/processors/igfs/IgfsAttributes.java   |   190 -
 .../internal/processors/igfs/IgfsBaseBlockKey.java |    42 -
 .../internal/processors/igfs/IgfsBlockKey.java     |   300 -
 .../processors/igfs/IgfsBlockLocationImpl.java     |   375 -
 .../processors/igfs/IgfsBlocksMessage.java         |   179 -
 .../processors/igfs/IgfsClientSession.java         |    74 -
 .../IgfsColocatedMetadataAffinityKeyMapper.java    |    47 -
 .../processors/igfs/IgfsCommunicationMessage.java  |    80 -
 .../internal/processors/igfs/IgfsContext.java      |   244 -
 .../internal/processors/igfs/IgfsCreateResult.java |    65 -
 .../internal/processors/igfs/IgfsDataManager.java  |  1747 ---
 .../processors/igfs/IgfsDeleteMessage.java         |   193 -
 .../internal/processors/igfs/IgfsDeleteResult.java |    62 -
 .../internal/processors/igfs/IgfsDeleteWorker.java |   345 -
 .../processors/igfs/IgfsDirectoryInfo.java         |   282 -
 .../internal/processors/igfs/IgfsEntryInfo.java    |   318 -
 .../ignite/internal/processors/igfs/IgfsEx.java    |   127 -
 .../processors/igfs/IgfsFileAffinityRange.java     |   418 -
 .../internal/processors/igfs/IgfsFileImpl.java     |   292 -
 .../internal/processors/igfs/IgfsFileInfo.java     |   289 -
 .../internal/processors/igfs/IgfsFileMap.java      |   399 -
 .../processors/igfs/IgfsFileWorkerBatch.java       |   215 -
 .../IgfsFileWorkerBatchCancelledException.java     |    51 -
 .../processors/igfs/IgfsFragmentizerManager.java   |   802 --
 .../processors/igfs/IgfsFragmentizerRequest.java   |   160 -
 .../processors/igfs/IgfsFragmentizerResponse.java  |   121 -
 .../processors/igfs/IgfsHandshakeResponse.java     |   104 -
 .../internal/processors/igfs/IgfsHelper.java       |    49 -
 .../internal/processors/igfs/IgfsHelperImpl.java   |    61 -
 .../ignite/internal/processors/igfs/IgfsImpl.java  |  1852 ---
 .../processors/igfs/IgfsInputStreamDescriptor.java |    81 -
 .../processors/igfs/IgfsInputStreamImpl.java       |   599 -
 .../processors/igfs/IgfsInvalidRangeException.java |    43 -
 .../internal/processors/igfs/IgfsIpcHandler.java   |   639 -
 .../internal/processors/igfs/IgfsJobImpl.java      |   123 -
 ...sLazySecondaryFileSystemPositionedReadable.java |    83 -
 .../internal/processors/igfs/IgfsListingEntry.java |   140 -
 .../internal/processors/igfs/IgfsLocalMetrics.java |   212 -
 .../internal/processors/igfs/IgfsManager.java      |   155 -
 .../internal/processors/igfs/IgfsMetaManager.java  |  3403 -----
 .../processors/igfs/IgfsMetricsAdapter.java        |   241 -
 .../internal/processors/igfs/IgfsModeResolver.java |   190 -
 .../processors/igfs/IgfsNodePredicate.java         |    80 -
 .../internal/processors/igfs/IgfsNoopHelper.java   |    41 -
 .../processors/igfs/IgfsNoopProcessor.java         |    83 -
 .../processors/igfs/IgfsOutputStreamImpl.java      |   368 -
 .../processors/igfs/IgfsOutputStreamProxyImpl.java |   162 -
 .../internal/processors/igfs/IgfsPathIds.java      |   323 -
 .../processors/igfs/IgfsPathsCreateResult.java     |    63 -
 .../internal/processors/igfs/IgfsProcessor.java    |   374 -
 .../processors/igfs/IgfsProcessorAdapter.java      |    81 -
 .../internal/processors/igfs/IgfsSamplingKey.java  |    86 -
 .../igfs/IgfsSecondaryFileSystemCreateContext.java |   113 -
 .../igfs/IgfsSecondaryFileSystemImpl.java          |   130 -
 .../igfs/IgfsSecondaryInputStreamDescriptor.java   |    59 -
 .../internal/processors/igfs/IgfsServer.java       |   481 -
 .../processors/igfs/IgfsServerHandler.java         |    56 -
 .../processors/igfs/IgfsServerManager.java         |   214 -
 .../internal/processors/igfs/IgfsStatus.java       |    79 -
 .../internal/processors/igfs/IgfsSyncMessage.java  |   152 -
 .../internal/processors/igfs/IgfsTaskArgsImpl.java |   139 -
 .../internal/processors/igfs/IgfsThread.java       |    88 -
 .../processors/igfs/IgfsThreadFactory.java         |    61 -
 .../ignite/internal/processors/igfs/IgfsUtils.java |  1226 --
 .../igfs/client/IgfsClientAbstractCallable.java    |   141 -
 .../igfs/client/IgfsClientAffinityCallable.java    |    96 -
 .../igfs/client/IgfsClientDeleteCallable.java      |    79 -
 .../igfs/client/IgfsClientExistsCallable.java      |    59 -
 .../igfs/client/IgfsClientInfoCallable.java        |    60 -
 .../igfs/client/IgfsClientListFilesCallable.java   |    61 -
 .../igfs/client/IgfsClientListPathsCallable.java   |    60 -
 .../igfs/client/IgfsClientMkdirsCallable.java      |    83 -
 .../igfs/client/IgfsClientRenameCallable.java      |    82 -
 .../igfs/client/IgfsClientSetTimesCallable.java    |    89 -
 .../igfs/client/IgfsClientSizeCallable.java        |    59 -
 .../igfs/client/IgfsClientSummaryCallable.java     |    60 -
 .../igfs/client/IgfsClientUpdateCallable.java      |    82 -
 .../meta/IgfsClientMetaIdsForPathCallable.java     |    65 -
 .../meta/IgfsClientMetaInfoForPathCallable.java    |    64 -
 .../client/meta/IgfsClientMetaUnlockCallable.java  |   126 -
 .../processors/igfs/data/IgfsDataPutProcessor.java |    98 -
 .../meta/IgfsMetaDirectoryCreateProcessor.java     |   181 -
 .../meta/IgfsMetaDirectoryListingAddProcessor.java |   136 -
 .../IgfsMetaDirectoryListingRemoveProcessor.java   |   131 -
 .../IgfsMetaDirectoryListingRenameProcessor.java   |   132 -
 .../IgfsMetaDirectoryListingReplaceProcessor.java  |   129 -
 .../igfs/meta/IgfsMetaFileCreateProcessor.java     |   193 -
 .../igfs/meta/IgfsMetaFileLockProcessor.java       |   106 -
 .../meta/IgfsMetaFileRangeDeleteProcessor.java     |   110 -
 .../meta/IgfsMetaFileRangeUpdateProcessor.java     |   119 -
 .../meta/IgfsMetaFileReserveSpaceProcessor.java    |   119 -
 .../igfs/meta/IgfsMetaFileUnlockProcessor.java     |   165 -
 .../meta/IgfsMetaUpdatePropertiesProcessor.java    |   120 -
 .../igfs/meta/IgfsMetaUpdateTimesProcessor.java    |   112 -
 .../internal/processors/igfs/package-info.java     |    23 -
 .../secondary/local/LocalFileSystemBlockKey.java   |   103 -
 .../secondary/local/LocalFileSystemIgfsFile.java   |   137 -
 .../local/LocalFileSystemPositionedReadable.java   |    64 -
 .../local/LocalFileSystemSizeVisitor.java          |    60 -
 .../igfs/secondary/local/LocalFileSystemUtils.java |   166 -
 .../internal/processors/job/GridJobWorker.java     |     3 +-
 .../processors/metric/GridMetricManager.java       |     3 -
 .../internal/processors/pool/PoolProcessor.java    |     5 -
 .../processors/resource/GridResourceIoc.java       |     6 +-
 .../processors/resource/GridResourceProcessor.java |    22 -
 .../internal/processors/task/GridTaskWorker.java   |     4 +-
 .../apache/ignite/internal/util/IgniteUtils.java   |   110 +-
 .../ignite/internal/util/ipc/IpcEndpoint.java      |     2 +-
 .../internal/util/ipc/IpcEndpointFactory.java      |     2 +-
 .../visor/cache/VisorCacheSqlMetadata.java         |    10 +-
 .../ignite/internal/visor/igfs/VisorIgfs.java      |    27 +-
 .../internal/visor/igfs/VisorIgfsEndpoint.java     |     1 +
 .../internal/visor/igfs/VisorIgfsFormatTask.java   |    10 +-
 .../visor/igfs/VisorIgfsFormatTaskArg.java         |     1 +
 .../internal/visor/igfs/VisorIgfsMetrics.java      |    32 +-
 .../visor/igfs/VisorIgfsMode.java}                 |    18 +-
 .../internal/visor/igfs/VisorIgfsProfiler.java     |     4 +-
 .../visor/igfs/VisorIgfsProfilerClearTask.java     |    57 +-
 .../visor/igfs/VisorIgfsProfilerClearTaskArg.java  |     1 +
 .../igfs/VisorIgfsProfilerClearTaskResult.java     |     1 +
 .../visor/igfs/VisorIgfsProfilerEntry.java         |    10 +-
 .../internal/visor/igfs/VisorIgfsProfilerTask.java |   495 +-
 .../visor/igfs/VisorIgfsProfilerTaskArg.java       |     1 +
 .../igfs/VisorIgfsProfilerUniformityCounters.java  |     1 +
 .../visor/igfs/VisorIgfsResetMetricsTask.java      |    11 +-
 .../visor/igfs/VisorIgfsResetMetricsTaskArg.java   |     1 +
 .../visor/igfs/VisorIgfsSamplingStateTask.java     |    16 +-
 .../visor/igfs/VisorIgfsSamplingStateTaskArg.java  |     1 +
 .../node/VisorExecutorServiceConfiguration.java    |     1 -
 .../visor/node/VisorGridConfiguration.java         |    11 +-
 .../visor/node/VisorHadoopConfiguration.java       |    18 -
 .../visor/node/VisorIgfsConfiguration.java         |    69 +-
 .../visor/node/VisorNodeDataCollectorJob.java      |    71 +-
 .../VisorQueryDetailMetricsCollectorTask.java      |     6 +-
 .../internal/visor/query/VisorQueryEntity.java     |     6 +-
 .../ignite/internal/visor/util/VisorTaskUtils.java |    27 -
 .../ignite/resources/FileSystemResource.java       |    62 -
 .../main/resources/META-INF/classnames.properties  |   125 -
 .../config/hadoop/core-site-loopback-secondary.xml |    54 -
 .../src/test/config/hadoop/core-site-loopback.xml  |    44 -
 .../src/test/config/hadoop/core-site-secondary.xml |    44 -
 modules/core/src/test/config/hadoop/core-site.xml  |    39 -
 modules/core/src/test/config/igfs-loopback.xml     |   112 -
 modules/core/src/test/config/igfs-shmem.xml        |   112 -
 .../ignite/igfs/IgfsEventsAbstractSelfTest.java    |   849 --
 .../igfs/IgfsFragmentizerAbstractSelfTest.java     |   144 -
 .../ignite/igfs/IgfsFragmentizerSelfTest.java      |   277 -
 .../igfs/IgfsFragmentizerTopologySelfTest.java     |    51 -
 .../org/apache/ignite/igfs/IgfsPathSelfTest.java   |   163 -
 .../apache/ignite/igfs/IgfsTestInputGenerator.java |    54 -
 .../ignite/internal/GridAffinitySelfTest.java      |     2 +-
 .../processors/cache/GridCacheLeakTest.java        |     2 +-
 .../dht/GridCacheDhtPreloadPerformanceTest.java    |     1 -
 .../DefaultIgfsSecondaryFileSystemTestAdapter.java |   117 -
 .../processors/igfs/IgfsAbstractBaseSelfTest.java  |  1052 --
 .../processors/igfs/IgfsAbstractSelfTest.java      |  2606 ----
 .../igfs/IgfsAtomicPrimaryMultiNodeSelfTest.java   |    39 -
 .../processors/igfs/IgfsAtomicPrimarySelfTest.java |    39 -
 .../processors/igfs/IgfsAttributesSelfTest.java    |    84 -
 .../igfs/IgfsBackupFailoverSelfTest.java           |   606 -
 .../igfs/IgfsBackupsDualAsyncSelfTest.java         |    40 -
 .../igfs/IgfsBackupsDualSyncSelfTest.java          |    40 -
 .../igfs/IgfsBackupsPrimarySelfTest.java           |    40 -
 ...fsBlockMessageSystemPoolStarvationSelfTest.java |   298 -
 ...IgfsCachePerBlockLruEvictionPolicySelfTest.java |   503 -
 .../processors/igfs/IgfsCacheSelfTest.java         |   138 -
 .../processors/igfs/IgfsCommonAbstractTest.java    |    57 -
 .../processors/igfs/IgfsDataManagerSelfTest.java   |   613 -
 .../processors/igfs/IgfsDualAbstractSelfTest.java  |  1688 ---
 .../igfs/IgfsDualAsyncClientSelfTest.java          |    28 -
 .../processors/igfs/IgfsDualAsyncSelfTest.java     |    32 -
 .../igfs/IgfsDualSyncClientSelfTest.java           |    28 -
 .../processors/igfs/IgfsDualSyncSelfTest.java      |    32 -
 .../processors/igfs/IgfsFileInfoSelfTest.java      |    83 -
 .../processors/igfs/IgfsFileMapSelfTest.java       |   347 -
 .../IgfsGroupDataBlockKeyMapperHashSelfTest.java   |   136 -
 .../internal/processors/igfs/IgfsIgniteMock.java   |   655 -
 ...calSecondaryFileSystemDualAbstractSelfTest.java |   297 -
 ...SecondaryFileSystemDualAsyncClientSelfTest.java |    28 -
 ...sLocalSecondaryFileSystemDualAsyncSelfTest.java |    32 -
 ...lSecondaryFileSystemDualSyncClientSelfTest.java |    28 -
 ...fsLocalSecondaryFileSystemDualSyncSelfTest.java |    32 -
 ...ocalSecondaryFileSystemProxyClientSelfTest.java |    28 -
 .../IgfsLocalSecondaryFileSystemProxySelfTest.java |   354 -
 .../IgfsLocalSecondaryFileSystemTestAdapter.java   |   168 -
 .../processors/igfs/IgfsMetaManagerSelfTest.java   |   404 -
 .../processors/igfs/IgfsMetricsSelfTest.java       |   527 -
 .../ignite/internal/processors/igfs/IgfsMock.java  |   438 -
 .../processors/igfs/IgfsModeResolverSelfTest.java  |   192 -
 .../processors/igfs/IgfsModesSelfTest.java         |   497 -
 .../processors/igfs/IgfsOneClientNodeTest.java     |   127 -
 .../processors/igfs/IgfsPrimaryClientSelfTest.java |    30 -
 .../igfs/IgfsPrimaryMultiNodeSelfTest.java         |    40 -
 ...gfsPrimaryRelaxedConsistencyClientSelfTest.java |    28 -
 ...PrimaryRelaxedConsistencyMultiNodeSelfTest.java |    37 -
 .../IgfsPrimaryRelaxedConsistencySelfTest.java     |    28 -
 .../processors/igfs/IgfsPrimarySelfTest.java       |    32 -
 .../processors/igfs/IgfsProcessorSelfTest.java     |   994 --
 .../igfs/IgfsProcessorValidationSelfTest.java      |   409 -
 .../processors/igfs/IgfsProxySelfTest.java         |    32 -
 .../IgfsSecondaryFileSystemInjectionSelfTest.java  |   267 -
 .../igfs/IgfsSecondaryFileSystemTestAdapter.java   |   117 -
 ...gerIpcEndpointRegistrationAbstractSelfTest.java |   253 -
 ...cEndpointRegistrationOnLinuxAndMacSelfTest.java |    56 -
 ...erIpcEndpointRegistrationOnWindowsSelfTest.java |    62 -
 .../internal/processors/igfs/IgfsSizeSelfTest.java |   710 -
 .../processors/igfs/IgfsStartCacheTest.java        |   157 -
 .../processors/igfs/IgfsStreamsSelfTest.java       |   456 -
 .../internal/processors/igfs/IgfsTaskSelfTest.java |   281 -
 .../processors/igfs/IgfsTestInputStream.java       |    67 -
 .../processors/igfs/benchmark/IgfsBenchmark.java   |   561 -
 .../internal/processors/igfs/package-info.java     |    23 -
 .../split/IgfsAbstractRecordResolverSelfTest.java  |   172 -
 .../IgfsByteDelimiterRecordResolverSelfTest.java   |   344 -
 .../IgfsFixedLengthRecordResolverSelfTest.java     |   149 -
 ...IgfsNewLineDelimiterRecordResolverSelfTest.java |   132 -
 .../IgfsStringDelimiterRecordResolverSelfTest.java |   139 -
 .../ignite/internal/util/IgniteUtilsSelfTest.java  |    69 +-
 .../IpcSharedMemoryCrashDetectionSelfTest.java     |   255 +-
 .../util/ipc/shmem/IpcSharedMemoryNodeStartup.java |    19 -
 ...TestClient.java => SharedMemoryTestClient.java} |     2 +-
 ...TestServer.java => SharedMemoryTestServer.java} |     2 +-
 .../junits/GridTestKernalContext.java              |     1 -
 .../ignite/testframework/junits/IgniteMock.java    |    11 -
 .../junits/multijvm/IgniteClusterProcessProxy.java |     5 -
 .../junits/multijvm/IgniteProcessProxy.java        |    22 -
 .../ignite/testsuites/IgniteIgfsTestSuite.java     |   152 -
 .../ignite/thread/IgniteThreadPoolSizeTest.java    |     8 -
 .../ignite/thread/ThreadPoolMetricsTest.java       |     1 -
 modules/hadoop/README.txt                          |    33 -
 modules/hadoop/config/core-site.ignite.xml         |    90 -
 modules/hadoop/config/hive-site.ignite.xml         |    37 -
 modules/hadoop/config/mapred-site.ignite.xml       |    66 -
 modules/hadoop/docs/HADOOP_README.txt              |   122 -
 modules/hadoop/pom.xml                             |   246 -
 .../hadoop/fs/BasicHadoopFileSystemFactory.java    |   163 -
 .../hadoop/fs/CachingHadoopFileSystemFactory.java  |    41 -
 .../ignite/hadoop/fs/HadoopFileSystemFactory.java  |    44 -
 .../fs/IgniteHadoopFileSystemCounterWriter.java    |    72 -
 .../fs/IgniteHadoopIgfsSecondaryFileSystem.java    |   287 -
 .../hadoop/fs/KerberosHadoopFileSystemFactory.java |   141 -
 .../org/apache/ignite/hadoop/fs/package-info.java  |    23 -
 .../hadoop/fs/v1/IgniteHadoopFileSystem.java       |   922 --
 .../apache/ignite/hadoop/fs/v1/package-info.java   |    23 -
 .../hadoop/fs/v2/IgniteHadoopFileSystem.java       |   802 --
 .../apache/ignite/hadoop/fs/v2/package-info.java   |    23 -
 .../io/BytesWritablePartiallyRawComparator.java    |    51 -
 .../ignite/hadoop/io/PartiallyRawComparator.java   |    33 -
 .../org/apache/ignite/hadoop/io/RawMemory.java     |    86 -
 .../hadoop/io/TextPartiallyRawComparator.java      |    51 -
 .../org/apache/ignite/hadoop/io/package-info.java  |    23 -
 .../IgniteHadoopClientProtocolProvider.java        |   132 -
 .../IgniteHadoopWeightedMapReducePlanner.java      |   858 --
 .../ignite/hadoop/mapreduce/package-info.java      |    23 -
 .../planner/HadoopAbstractMapReducePlanner.java    |   115 -
 .../planner/HadoopTestRoundRobinMrPlanner.java     |    75 -
 .../apache/ignite/hadoop/planner/package-info.java |    23 -
 .../ignite/hadoop/util/BasicUserNameMapper.java    |   111 -
 .../ignite/hadoop/util/ChainedUserNameMapper.java  |    93 -
 .../ignite/hadoop/util/KerberosUserNameMapper.java |   137 -
 .../apache/ignite/hadoop/util/UserNameMapper.java  |    34 -
 .../apache/ignite/hadoop/util/package-info.java    |    23 -
 .../processors/hadoop/HadoopAttributes.java        |   166 -
 .../processors/hadoop/HadoopCommonUtils.java       |   154 -
 .../processors/hadoop/HadoopComponent.java         |    62 -
 .../internal/processors/hadoop/HadoopContext.java  |   202 -
 .../processors/hadoop/HadoopExternalSplit.java     |    96 -
 .../processors/hadoop/HadoopHelperImpl.java        |   138 -
 .../internal/processors/hadoop/HadoopImpl.java     |   134 -
 .../processors/hadoop/HadoopMapperUtils.java       |    56 -
 .../processors/hadoop/HadoopProcessor.java         |   252 -
 .../internal/processors/hadoop/HadoopSetup.java    |   541 -
 .../processors/hadoop/HadoopSplitWrapper.java      |   128 -
 .../hadoop/HadoopTaskCancelledException.java       |    35 -
 .../hadoop/counter/HadoopCounterAdapter.java       |   130 -
 .../hadoop/counter/HadoopCountersImpl.java         |   200 -
 .../hadoop/counter/HadoopLongCounter.java          |    93 -
 .../hadoop/counter/HadoopPerformanceCounter.java   |   286 -
 .../hadoop/delegate/HadoopDelegateUtils.java       |   136 -
 .../HadoopFileSystemCounterWriterDelegate.java     |    36 -
 .../delegate/HadoopFileSystemFactoryDelegate.java  |    35 -
 .../HadoopIgfsSecondaryFileSystemDelegate.java     |    28 -
 .../processors/hadoop/igfs/HadoopIgfsEndpoint.java |   208 -
 .../hadoop/impl/HadoopMapReduceCounterGroup.java   |   123 -
 .../hadoop/impl/HadoopMapReduceCounters.java       |   228 -
 .../processors/hadoop/impl/HadoopUtils.java        |   437 -
 .../HadoopBasicFileSystemFactoryDelegate.java      |   177 -
 .../HadoopCachingFileSystemFactoryDelegate.java    |    74 -
 .../HadoopDefaultFileSystemFactoryDelegate.java    |    61 -
 .../HadoopFileSystemCounterWriterDelegateImpl.java |   107 -
 .../HadoopIgfsSecondaryFileSystemDelegateImpl.java |   527 -
 .../HadoopKerberosFileSystemFactoryDelegate.java   |   120 -
 .../hadoop/impl/fs/HadoopFileSystemCacheUtils.java |   248 -
 .../hadoop/impl/fs/HadoopFileSystemsUtils.java     |    62 -
 .../hadoop/impl/fs/HadoopLazyConcurrentMap.java    |   209 -
 .../hadoop/impl/fs/HadoopLocalFileSystemV1.java    |    39 -
 .../hadoop/impl/fs/HadoopLocalFileSystemV2.java    |    88 -
 .../hadoop/impl/fs/HadoopParameters.java           |    94 -
 .../hadoop/impl/fs/HadoopRawLocalFileSystem.java   |   314 -
 .../processors/hadoop/impl/igfs/HadoopIgfs.java    |   202 -
 .../igfs/HadoopIgfsCommunicationException.java     |    57 -
 .../processors/hadoop/impl/igfs/HadoopIgfsEx.java  |   101 -
 .../hadoop/impl/igfs/HadoopIgfsFuture.java         |    94 -
 .../hadoop/impl/igfs/HadoopIgfsInProc.java         |   678 -
 .../hadoop/impl/igfs/HadoopIgfsInputStream.java    |   629 -
 .../processors/hadoop/impl/igfs/HadoopIgfsIo.java  |    76 -
 .../hadoop/impl/igfs/HadoopIgfsIpcIo.java          |   624 -
 .../hadoop/impl/igfs/HadoopIgfsIpcIoListener.java  |    36 -
 .../hadoop/impl/igfs/HadoopIgfsJclLogger.java      |   123 -
 .../hadoop/impl/igfs/HadoopIgfsOutProc.java        |   531 -
 .../hadoop/impl/igfs/HadoopIgfsOutputStream.java   |   201 -
 .../hadoop/impl/igfs/HadoopIgfsProperties.java     |    89 -
 .../impl/igfs/HadoopIgfsProxyInputStream.java      |   337 -
 .../impl/igfs/HadoopIgfsProxyOutputStream.java     |   165 -
 ...pIgfsSecondaryFileSystemPositionedReadable.java |   112 -
 .../hadoop/impl/igfs/HadoopIgfsStreamDelegate.java |    96 -
 .../impl/igfs/HadoopIgfsStreamEventListener.java   |    39 -
 .../hadoop/impl/igfs/HadoopIgfsUtils.java          |   179 -
 .../hadoop/impl/igfs/HadoopIgfsWrapper.java        |   560 -
 .../hadoop/impl/proto/HadoopClientProtocol.java    |   384 -
 .../hadoop/impl/v1/HadoopV1CleanupTask.java        |    64 -
 .../processors/hadoop/impl/v1/HadoopV1Counter.java |   106 -
 .../processors/hadoop/impl/v1/HadoopV1MapTask.java |   135 -
 .../hadoop/impl/v1/HadoopV1OutputCollector.java    |   137 -
 .../hadoop/impl/v1/HadoopV1Partitioner.java        |    44 -
 .../hadoop/impl/v1/HadoopV1ReduceTask.java         |   116 -
 .../hadoop/impl/v1/HadoopV1Reporter.java           |    85 -
 .../hadoop/impl/v1/HadoopV1SetupTask.java          |    56 -
 .../hadoop/impl/v1/HadoopV1Splitter.java           |   102 -
 .../processors/hadoop/impl/v1/HadoopV1Task.java    |    97 -
 .../processors/hadoop/impl/v2/HadoopDaemon.java    |   125 -
 .../hadoop/impl/v2/HadoopSerializationWrapper.java |   138 -
 .../hadoop/impl/v2/HadoopShutdownHookManager.java  |    98 -
 .../hadoop/impl/v2/HadoopV2CleanupTask.java        |    71 -
 .../processors/hadoop/impl/v2/HadoopV2Context.java |   245 -
 .../processors/hadoop/impl/v2/HadoopV2Counter.java |    88 -
 ...pV2DelegatingPartiallyOffheapRawComparator.java |    54 -
 .../processors/hadoop/impl/v2/HadoopV2Job.java     |   511 -
 .../hadoop/impl/v2/HadoopV2JobResourceManager.java |   321 -
 .../processors/hadoop/impl/v2/HadoopV2MapTask.java |   111 -
 .../hadoop/impl/v2/HadoopV2Partitioner.java        |    44 -
 .../hadoop/impl/v2/HadoopV2ReduceTask.java         |   105 -
 .../hadoop/impl/v2/HadoopV2SetupTask.java          |    64 -
 .../hadoop/impl/v2/HadoopV2Splitter.java           |   111 -
 .../processors/hadoop/impl/v2/HadoopV2Task.java    |   185 -
 .../hadoop/impl/v2/HadoopV2TaskContext.java        |   628 -
 .../impl/v2/HadoopWritableSerialization.java       |    75 -
 .../processors/hadoop/io/OffheapRawMemory.java     |   131 -
 .../hadoop/jobtracker/HadoopJobMetadata.java       |   316 -
 .../hadoop/jobtracker/HadoopJobTracker.java        |  1780 ---
 .../hadoop/mapreduce/MapReduceClient.java          |   160 -
 .../hadoop/planner/HadoopDefaultMapReducePlan.java |   109 -
 .../hadoop/planner/HadoopMapReducePlanGroup.java   |   149 -
 .../planner/HadoopMapReducePlanTopology.java       |    88 -
 .../proto/HadoopProtocolJobCountersTask.java       |    46 -
 .../hadoop/proto/HadoopProtocolJobStatusTask.java  |    82 -
 .../hadoop/proto/HadoopProtocolKillJobTask.java    |    46 -
 .../hadoop/proto/HadoopProtocolNextTaskIdTask.java |    36 -
 .../hadoop/proto/HadoopProtocolSubmitJobTask.java  |    59 -
 .../hadoop/proto/HadoopProtocolTaskAdapter.java    |   119 -
 .../hadoop/proto/HadoopProtocolTaskArguments.java  |    84 -
 .../processors/hadoop/shuffle/HadoopShuffle.java   |   301 -
 .../hadoop/shuffle/HadoopShuffleJob.java           |  1113 --
 .../hadoop/shuffle/HadoopShuffleLocalState.java    |    67 -
 .../hadoop/shuffle/HadoopShuffleRemoteState.java   |    60 -
 .../collections/HadoopConcurrentHashMultimap.java  |   616 -
 .../shuffle/collections/HadoopHashMultimap.java    |   176 -
 .../collections/HadoopHashMultimapBase.java        |   212 -
 .../hadoop/shuffle/collections/HadoopMultimap.java |   113 -
 .../shuffle/collections/HadoopMultimapBase.java    |   438 -
 .../hadoop/shuffle/collections/HadoopSkipList.java |   744 --
 .../shuffle/direct/HadoopDirectDataInput.java      |   232 -
 .../shuffle/direct/HadoopDirectDataOutput.java     |   234 -
 .../direct/HadoopDirectDataOutputContext.java      |   139 -
 .../direct/HadoopDirectDataOutputState.java        |    66 -
 .../hadoop/shuffle/streams/HadoopDataInStream.java |   203 -
 .../shuffle/streams/HadoopDataOutStream.java       |   130 -
 .../shuffle/streams/HadoopOffheapBuffer.java       |   140 -
 .../taskexecutor/HadoopEmbeddedTaskExecutor.java   |   152 -
 .../hadoop/taskexecutor/HadoopExecutorService.java |   233 -
 .../hadoop/taskexecutor/HadoopRunnableTask.java    |   300 -
 .../taskexecutor/HadoopTaskExecutorAdapter.java    |    59 -
 .../hadoop/taskexecutor/HadoopTaskState.java       |    38 -
 .../hadoop/taskexecutor/HadoopTaskStatus.java      |   116 -
 .../external/HadoopExternalTaskExecutor.java       |   977 --
 .../external/HadoopExternalTaskMetadata.java       |    67 -
 .../external/HadoopJobInfoUpdateRequest.java       |   113 -
 .../external/HadoopPrepareForJobRequest.java       |   130 -
 .../external/HadoopProcessDescriptor.java          |   149 -
 .../external/HadoopProcessStartedAck.java          |    47 -
 .../external/HadoopTaskExecutionRequest.java       |   114 -
 .../external/HadoopTaskFinishedMessage.java        |    94 -
 .../external/child/HadoopChildProcessRunner.java   |   464 -
 .../child/HadoopExternalProcessStarter.java        |   301 -
 .../HadoopAbstractCommunicationClient.java         |    96 -
 .../communication/HadoopCommunicationClient.java   |    72 -
 .../communication/HadoopExternalCommunication.java |  1474 ---
 .../HadoopHandshakeTimeoutException.java           |    42 -
 .../communication/HadoopIpcToNioAdapter.java       |   253 -
 .../communication/HadoopMarshallerFilter.java      |    93 -
 .../communication/HadoopMessageListener.java       |    39 -
 .../HadoopTcpNioCommunicationClient.java           |    93 -
 .../main/resources/META-INF/classnames.properties  |   101 -
 ...adoop.mapreduce.protocol.ClientProtocolProvider |     1 -
 .../src/test/config/hadoop-fs-open-test/grid-0.xml |   125 -
 .../src/test/config/hadoop-fs-open-test/grid-1.xml |   125 -
 .../src/test/config/hadoop-fs-open-test/grid-2.xml |   125 -
 .../src/test/config/igfs-cli-config-dual-async.xml |   134 -
 .../src/test/config/igfs-cli-config-dual-sync.xml  |   132 -
 .../src/test/config/igfs-cli-config-primary.xml    |   123 -
 .../src/test/config/igfs-cli-config-proxy.xml      |   132 -
 .../processors/hadoop/HadoopSharedMap.java         |    67 -
 .../processors/hadoop/HadoopTestClassLoader.java   |    99 -
 .../hadoop/impl/HadoopAbstractMapReduceTest.java   |   449 -
 .../hadoop/impl/HadoopAbstractSelfTest.java        |   247 -
 .../hadoop/impl/HadoopAbstractWordCountTest.java   |   175 -
 .../hadoop/impl/HadoopCommandLineTest.java         |   500 -
 .../hadoop/impl/HadoopErrorSimulator.java          |   326 -
 .../hadoop/impl/HadoopFileSystemsTest.java         |   165 -
 .../processors/hadoop/impl/HadoopGroupingTest.java |   304 -
 .../hadoop/impl/HadoopJobTrackerSelfTest.java      |   329 -
 .../impl/HadoopMapReduceEmbeddedSelfTest.java      |   274 -
 .../impl/HadoopMapReduceErrorResilienceTest.java   |   163 -
 .../hadoop/impl/HadoopMapReduceTest.java           |    68 -
 .../hadoop/impl/HadoopNoHadoopMapReduceTest.java   |    49 -
 .../hadoop/impl/HadoopPlannerMockJob.java          |   187 -
 .../impl/HadoopSerializationWrapperSelfTest.java   |    83 -
 .../hadoop/impl/HadoopSnappyFullMapReduceTest.java |    46 -
 .../processors/hadoop/impl/HadoopSnappyTest.java   |   109 -
 .../hadoop/impl/HadoopSortingExternalTest.java     |    46 -
 .../processors/hadoop/impl/HadoopSortingTest.java  |   306 -
 .../hadoop/impl/HadoopSplitWrapperSelfTest.java    |    72 -
 .../processors/hadoop/impl/HadoopStartup.java      |    53 -
 .../hadoop/impl/HadoopTaskExecutionSelfTest.java   |   550 -
 .../processors/hadoop/impl/HadoopTasksV1Test.java  |    62 -
 .../processors/hadoop/impl/HadoopTasksV2Test.java  |    81 -
 .../impl/HadoopTasksVersionsAbstractTest.java      |   269 -
 .../processors/hadoop/impl/HadoopTeraSortTest.java |   415 -
 .../hadoop/impl/HadoopTestTaskContext.java         |   233 -
 .../processors/hadoop/impl/HadoopTestUtils.java    |   177 -
 .../hadoop/impl/HadoopTxConfigCacheTest.java       |    44 -
 .../hadoop/impl/HadoopUserLibsSelfTest.java        |   268 -
 .../hadoop/impl/HadoopV2JobSelfTest.java           |   110 -
 .../hadoop/impl/HadoopValidationSelfTest.java      |    55 -
 .../impl/HadoopWeightedMapReducePlannerTest.java   |   604 -
 .../impl/HadoopWeightedPlannerMapReduceTest.java   |    38 -
 .../hadoop/impl/books/alice-in-wonderland.txt      |  3735 ------
 .../processors/hadoop/impl/books/art-of-war.txt    |  6982 ----------
 .../hadoop/impl/books/huckleberry-finn.txt         | 11733 -----------------
 .../hadoop/impl/books/sherlock-holmes.txt          | 13052 -------------------
 .../processors/hadoop/impl/books/tom-sawyer.txt    |  8858 -------------
 .../HadoopClientProtocolEmbeddedSelfTest.java      |    35 -
 ...adoopClientProtocolMultipleServersSelfTest.java |   315 -
 .../impl/client/HadoopClientProtocolSelfTest.java  |   666 -
 .../hadoop/impl/examples/HadoopPopularWords.java   |   298 -
 .../hadoop/impl/examples/HadoopWordCount1.java     |    94 -
 .../hadoop/impl/examples/HadoopWordCount1Map.java  |    79 -
 .../impl/examples/HadoopWordCount1Reduce.java      |    61 -
 .../hadoop/impl/examples/HadoopWordCount2.java     |   111 -
 .../impl/examples/HadoopWordCount2Combiner.java    |    45 -
 .../impl/examples/HadoopWordCount2Mapper.java      |    88 -
 .../impl/examples/HadoopWordCount2Reducer.java     |   113 -
 .../KerberosHadoopFileSystemFactorySelfTest.java   |   129 -
 .../hadoop/impl/igfs/Hadoop1DualAbstractTest.java  |   166 -
 .../impl/igfs/Hadoop1OverIgfsDualAsyncTest.java    |    32 -
 .../impl/igfs/Hadoop1OverIgfsDualSyncTest.java     |    32 -
 .../hadoop/impl/igfs/Hadoop1OverIgfsProxyTest.java |    69 -
 .../impl/igfs/HadoopFIleSystemFactorySelfTest.java |   350 -
 .../HadoopIgfs20FileSystemAbstractSelfTest.java    |  2109 ---
 ...oopIgfs20FileSystemLoopbackPrimarySelfTest.java |    86 -
 ...HadoopIgfs20FileSystemShmemPrimarySelfTest.java |    77 -
 .../impl/igfs/HadoopIgfsDualAbstractSelfTest.java  |   328 -
 .../impl/igfs/HadoopIgfsDualAsyncSelfTest.java     |    32 -
 .../impl/igfs/HadoopIgfsDualSyncSelfTest.java      |    32 -
 .../HadoopIgfsSecondaryFileSystemTestAdapter.java  |   152 -
 ...HadoopSecondaryFileSystemConfigurationTest.java |   602 -
 .../hadoop/impl/igfs/IgfsEventsTestSuite.java      |   307 -
 .../impl/igfs/IgfsNearOnlyMultiNodeSelfTest.java   |   216 -
 .../IgniteHadoopFileSystemAbstractSelfTest.java    |  2574 ----
 ...adoopFileSystemClientBasedAbstractSelfTest.java |   196 -
 ...doopFileSystemClientBasedDualAsyncSelfTest.java |    47 -
 ...adoopFileSystemClientBasedDualSyncSelfTest.java |    47 -
 .../IgniteHadoopFileSystemClientBasedOpenTest.java |   301 -
 ...HadoopFileSystemClientBasedPrimarySelfTest.java |    47 -
 ...teHadoopFileSystemClientBasedProxySelfTest.java |    46 -
 .../igfs/IgniteHadoopFileSystemClientSelfTest.java |   220 -
 .../IgniteHadoopFileSystemHandshakeSelfTest.java   |   283 -
 .../IgniteHadoopFileSystemIpcCacheSelfTest.java    |   215 -
 .../igfs/IgniteHadoopFileSystemLoggerSelfTest.java |   303 -
 .../IgniteHadoopFileSystemLoggerStateSelfTest.java |   341 -
 ...teHadoopFileSystemLoopbackAbstractSelfTest.java |    50 -
 ...ileSystemLoopbackEmbeddedDualAsyncSelfTest.java |    43 -
 ...FileSystemLoopbackEmbeddedDualSyncSelfTest.java |    43 -
 ...pFileSystemLoopbackEmbeddedPrimarySelfTest.java |    43 -
 ...ileSystemLoopbackEmbeddedSecondarySelfTest.java |    43 -
 ...ileSystemLoopbackExternalDualAsyncSelfTest.java |    33 -
 ...FileSystemLoopbackExternalDualSyncSelfTest.java |    33 -
 ...pFileSystemLoopbackExternalPrimarySelfTest.java |    33 -
 ...ileSystemLoopbackExternalSecondarySelfTest.java |    33 -
 ...emLoopbackExternalToClientAbstractSelfTest.java |    60 -
 ...mLoopbackExternalToClientDualAsyncSelfTest.java |    33 -
 ...emLoopbackExternalToClientDualSyncSelfTest.java |    33 -
 ...temLoopbackExternalToClientPrimarySelfTest.java |    33 -
 ...ystemLoopbackExternalToClientProxySelfTest.java |    33 -
 ...gniteHadoopFileSystemShmemAbstractSelfTest.java |    95 -
 ...opFileSystemShmemExternalDualAsyncSelfTest.java |    41 -
 ...oopFileSystemShmemExternalDualSyncSelfTest.java |    33 -
 ...doopFileSystemShmemExternalPrimarySelfTest.java |    33 -
 ...opFileSystemShmemExternalSecondarySelfTest.java |    33 -
 ...ystemShmemExternalToClientAbstractSelfTest.java |   107 -
 ...stemShmemExternalToClientDualAsyncSelfTest.java |    33 -
 ...ystemShmemExternalToClientDualSyncSelfTest.java |    33 -
 ...SystemShmemExternalToClientPrimarySelfTest.java |    33 -
 ...leSystemShmemExternalToClientProxySelfTest.java |    33 -
 .../shuffle/collections/HadoopAbstractMapTest.java |   187 -
 .../HadoopConcurrentHashMultimapSelftest.java      |   283 -
 .../shuffle/collections/HadoopHashMapSelfTest.java |   134 -
 .../collections/HadoopSkipListSelfTest.java        |   298 -
 .../shuffle/streams/HadoopDataStreamSelfTest.java  |   301 -
 .../taskexecutor/HadoopExecutorServiceTest.java    |    73 -
 .../HadoopExternalTaskExecutionSelfTest.java       |   237 -
 .../HadoopExternalCommunicationSelfTest.java       |   227 -
 .../impl/util/BasicUserNameMapperSelfTest.java     |   137 -
 .../impl/util/ChainedUserNameMapperSelfTest.java   |   112 -
 .../impl/util/KerberosUserNameMapperSelfTest.java  |   105 -
 .../hadoop/state/HadoopGroupingTestState.java      |    39 -
 .../state/HadoopJobTrackerSelfTestState.java       |    44 -
 .../HadoopMapReduceEmbeddedSelfTestState.java      |    31 -
 .../state/HadoopTaskExecutionSelfTestValues.java   |    50 -
 .../ignite/testsuites/IgniteHadoopTestSuite.java   |   379 -
 .../IgniteIgfsLinuxAndMacOSTestSuite.java          |    79 -
 .../cache/hibernate/HibernateCacheProxy.java       |    10 -
 .../ApiParity/IgniteConfigurationParityTest.cs     |     1 -
 .../java/org/apache/ignite/IgniteSpringBean.java   |    14 -
 .../spring/IgniteExcludeInConfigurationTest.java   |     5 +-
 .../org/apache/ignite/spring/sprint-exclude.xml    |    23 +-
 .../config/VisorConfigurationCommand.scala         |     7 +-
 .../visor/commands/open/VisorOpenCommand.scala     |     5 +-
 parent/pom.xml                                     |     8 -
 pom.xml                                            |     1 -
 696 files changed, 277 insertions(+), 164003 deletions(-)

diff --git a/DEVNOTES.txt b/DEVNOTES.txt
index 7058620..a3b63bd 100644
--- a/DEVNOTES.txt
+++ b/DEVNOTES.txt
@@ -52,28 +52,6 @@ Apache Ignite with LGPL Maven Build Instructions
    Look for apache-ignite-lgpl-<version>-bin.zip in ./target/bin directory.
 
 
-Ignite Hadoop Accelerator Maven Build Instructions
-==================================================
-1) Compile and install:
-
-        mvn clean install -Pall-java,all-scala,licenses -DskipTests
-
-   Use 'hadoop.version' parameter to build Ignite against a specific Hadoop version.
-   Use 'spark.version' parameter to build ignite-spark module for a specific Spark version. Version should be >= 2.0.0.
-   For example:
-
-        mvn clean install -Pall-java,all-scala,licenses -DskipTests -Dhadoop.version=2.4.2 -Dspark.version=2.1.1
-
-2) Assembly Hadoop Accelerator:
-
-        mvn initialize -Prelease -Dignite.edition=apache-ignite-hadoop
-
-   Look for apache-ignite-hadoop-<version>-bin.zip in ./target/bin directory. Resulting binary
-   assembly will also include integration module for Apache Spark.
-
-   NOTE: JDK version should be 1.7.0-* or >= 1.8.0-u40.
-
-
 Ignite C++ Build Instructions
 =============================
 Instructions can be found at modules/platforms/cpp/DEVNOTES.txt.
@@ -165,10 +143,6 @@ Ignite Release Instructions
 
                 mvn initialize -Prelease
 
-   3.4) Assembly Hadoop Accelerator:
-
-                mvn initialize -Prelease -Dignite.edition=apache-ignite-hadoop
-
    NOTE: Nexus staging (repository.apache.org) should be closed with appropriate comment contains release version and
    release candidate number, for example "Apache Ignite 1.0.0-rc7", when mvn deploy finished.
 
diff --git a/assembly/dependencies-apache-ignite-hadoop.xml b/assembly/dependencies-apache-ignite-hadoop.xml
deleted file mode 100644
index 076f09c..0000000
--- a/assembly/dependencies-apache-ignite-hadoop.xml
+++ /dev/null
@@ -1,166 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
-          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-          xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2
-          http://maven.apache.org/xsd/assembly-1.1.2.xsd">
-    <id>dependencies</id>
-
-    <formats>
-        <format>dir</format>
-    </formats>
-
-    <includeBaseDirectory>false</includeBaseDirectory>
-
-    <moduleSets>
-        <moduleSet>
-            <includes>
-                <include>org.apache.ignite:ignite-spark</include>
-                <include>org.apache.ignite:ignite-spring</include>
-                <include>org.apache.ignite:ignite-log4j</include>
-                <include>org.apache.ignite:ignite-indexing</include>
-            </includes>
-            <sources>
-                <includeModuleDirectory>true</includeModuleDirectory>
-                <fileSets>
-                    <fileSet>
-                        <directory>${basedir}</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <includes>
-                            <include>README.txt</include>
-                            <include>licenses/**</include>
-                        </includes>
-                    </fileSet>
-                    <fileSet>
-                        <directory>${basedir}/target/licenses</directory>
-                        <outputDirectory>/licenses</outputDirectory>
-                    </fileSet>
-                    <fileSet>
-                        <directory>target/libs</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <excludes>
-                            <exclude>hadoop*.jar</exclude>
-                        </excludes>
-                    </fileSet>
-                    <fileSet>
-                        <directory>target</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <includes>
-                            <include>*.jar</include>
-                        </includes>
-                        <excludes>
-                            <exclude>*-tests.jar</exclude>
-                            <exclude>*-javadoc.jar</exclude>
-                            <exclude>*-sources.jar</exclude>
-                        </excludes>
-                    </fileSet>
-                </fileSets>
-            </sources>
-        </moduleSet>
-
-        <moduleSet>
-            <includes>
-                <include>org.apache.ignite:ignite-core</include>
-            </includes>
-            <sources>
-                <includeModuleDirectory>false</includeModuleDirectory>
-                <fileSets>
-                    <fileSet>
-                        <directory>${basedir}</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <includes>
-                            <include>licenses/**</include>
-                        </includes>
-                    </fileSet>
-                    <fileSet>
-                        <directory>${basedir}/target/licenses</directory>
-                        <outputDirectory>/licenses</outputDirectory>
-                    </fileSet>
-                    <fileSet>
-                        <directory>target/libs</directory>
-                        <outputDirectory>/</outputDirectory>
-                    </fileSet>
-                    <fileSet>
-                        <directory>target</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <includes>
-                            <include>*.jar</include>
-                        </includes>
-                        <excludes>
-                            <exclude>*-tests.jar</exclude>
-                            <exclude>*-javadoc.jar</exclude>
-                            <exclude>*-sources.jar</exclude>
-                        </excludes>
-                    </fileSet>
-                </fileSets>
-            </sources>
-        </moduleSet>
-
-        <moduleSet>
-            <includes>
-                <include>org.apache.ignite:ignite-hadoop</include>
-            </includes>
-            <sources>
-                <includeModuleDirectory>true</includeModuleDirectory>
-                <fileSets>
-                    <fileSet>
-                        <directory>${basedir}</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <includes>
-                            <include>README.txt</include>
-                            <include>licenses/**</include>
-                        </includes>
-                    </fileSet>
-                    <fileSet>
-                        <directory>${basedir}/target/licenses</directory>
-                        <outputDirectory>/licenses</outputDirectory>
-                    </fileSet>
-                    <fileSet>
-                        <directory>target/libs</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <excludes>
-                            <exclude>hadoop*.jar</exclude>
-                            <exclude>log4j*.jar</exclude>
-                        </excludes>
-                    </fileSet>
-                    <fileSet>
-                        <directory>target</directory>
-                        <outputDirectory>/</outputDirectory>
-                        <includes>
-                            <include>*.jar</include>
-                        </includes>
-                        <excludes>
-                            <exclude>*-tests.jar</exclude>
-                            <exclude>*-javadoc.jar</exclude>
-                            <exclude>*-sources.jar</exclude>
-                        </excludes>
-                    </fileSet>
-                </fileSets>
-            </sources>
-        </moduleSet>
-    </moduleSets>
-
-    <fileSets>
-        <fileSet>
-            <directory>modules/core/licenses</directory>
-            <outputDirectory>/licenses</outputDirectory>
-        </fileSet>
-    </fileSets>
-</assembly>
diff --git a/assembly/dependencies-apache-ignite-lgpl.xml b/assembly/dependencies-apache-ignite-lgpl.xml
index 7337210..2887c50 100644
--- a/assembly/dependencies-apache-ignite-lgpl.xml
+++ b/assembly/dependencies-apache-ignite-lgpl.xml
@@ -124,7 +124,6 @@
                 <exclude>org.apache.ignite:ignite-visor-console</exclude>
                 <exclude>org.apache.ignite:ignite-visor-console_2.10</exclude>
                 <exclude>org.apache.ignite:ignite-visor-plugins</exclude>
-                <exclude>org.apache.ignite:ignite-hadoop</exclude>
                 <exclude>org.apache.ignite:ignite-codegen</exclude>
                 <exclude>org.apache.ignite:ignite-apache-license-gen</exclude>
                 <exclude>org.apache.ignite:ignite-appserver-test</exclude>
diff --git a/assembly/dependencies-apache-ignite.xml b/assembly/dependencies-apache-ignite.xml
index 029f7ab..0a7a68b 100644
--- a/assembly/dependencies-apache-ignite.xml
+++ b/assembly/dependencies-apache-ignite.xml
@@ -124,7 +124,6 @@
                 <exclude>org.apache.ignite:ignite-visor-console</exclude>
                 <exclude>org.apache.ignite:ignite-visor-console_2.10</exclude>
                 <exclude>org.apache.ignite:ignite-visor-plugins</exclude>
-                <exclude>org.apache.ignite:ignite-hadoop</exclude>
                 <exclude>org.apache.ignite:ignite-codegen</exclude>
                 <exclude>org.apache.ignite:ignite-apache-license-gen</exclude>
                 <exclude>org.apache.ignite:ignite-hibernate-core</exclude>
diff --git a/assembly/libs/README.txt b/assembly/libs/README.txt
index fc40956..3d45207 100644
--- a/assembly/libs/README.txt
+++ b/assembly/libs/README.txt
@@ -79,7 +79,6 @@ The following modules are available:
 - ignite-flink (for streaming from Apache Flink into Ignite)
 - ignite-flume (for streaming events from Apache Flume into Ignite)
 - ignite-gce (for automatic cluster discovery on Google Compute Engine)
-- ignite-hadoop (for Apache Hadoop Accelerator)
 - ignite-hibernate (for Hibernate integration)
 - ignite-hibernate5 (for Hibernate5 integration)
 - ignite-indexing (for SQL querying and indexing)
diff --git a/bin/ignite.sh b/bin/ignite.sh
index 46c6367..265b214 100755
--- a/bin/ignite.sh
+++ b/bin/ignite.sh
@@ -133,15 +133,6 @@ if [ "${ENABLE_ASSERTIONS}" = "1" ]; then
 fi
 
 #
-# If this is a Hadoop edition, and HADOOP_HOME set, add the native library location:
-#
-if [ -d "${IGNITE_HOME}/libs/ignite-hadoop/" ] && [ -n "${HADOOP_HOME}" ] && [ -d "${HADOOP_HOME}/lib/native/" ]; then
-   if [[ "${JVM_OPTS}${JVM_XOPTS}" != *-Djava.library.path=* ]]; then
-      JVM_OPTS="${JVM_OPTS} -Djava.library.path=${HADOOP_HOME}/lib/native/"
-   fi
-fi
-
-#
 # Set main class to start service (grid node by default).
 #
 if [ "${MAIN_CLASS:-}" = "" ]; then
diff --git a/config/hadoop/default-config.xml b/config/hadoop/default-config.xml
deleted file mode 100644
index 83d3679..0000000
--- a/config/hadoop/default-config.xml
+++ /dev/null
@@ -1,120 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    Ignite Spring configuration file.
-
-    When starting a standalone Ignite node, you need to execute the following command:
-    {IGNITE_HOME}/bin/ignite.{bat|sh} path-to-this-file/default-config.xml
-
-    When starting Ignite from Java IDE, pass path to this file into Ignition:
-    Ignition.start("path-to-this-file/default-config.xml");
--->
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:util="http://www.springframework.org/schema/util"
-       xsi:schemaLocation="http://www.springframework.org/schema/beans
-       http://www.springframework.org/schema/beans/spring-beans.xsd
-       http://www.springframework.org/schema/util
-       http://www.springframework.org/schema/util/spring-util.xsd">
-
-    <!--
-        Optional description.
-    -->
-    <description>
-        Spring file for Ignite node configuration with IGFS and Apache Hadoop map-reduce support enabled.
-        Ignite node will start with this configuration by default.
-    </description>
-
-    <!--
-        Initialize property configurer so we can reference environment variables.
-    -->
-    <bean id="propertyConfigurer" class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
-        <property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_FALLBACK"/>
-        <property name="searchSystemEnvironment" value="true"/>
-    </bean>
-
-    <!--
-        Configuration of Ignite node.
-    -->
-    <bean id="grid.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-        <!--
-            This port will be used by Apache Hadoop client to connect to Ignite node as if it was a job tracker.
-        -->
-        <property name="connectorConfiguration">
-            <bean class="org.apache.ignite.configuration.ConnectorConfiguration">
-                <property name="port" value="11211"/>
-            </bean>
-        </property>
-
-        <!--
-            Configure one IGFS file system instance named "igfs" on this node.
-        -->
-        <property name="fileSystemConfiguration">
-            <list>
-                <bean class="org.apache.ignite.configuration.FileSystemConfiguration">
-                    <!-- IGFS name you will use to access IGFS through Hadoop API. -->
-                    <property name="name" value="igfs"/>
-
-                    <!-- Configure TCP endpoint for communication with the file system instance. -->
-                    <property name="ipcEndpointConfiguration">
-                        <bean class="org.apache.ignite.igfs.IgfsIpcEndpointConfiguration">
-                            <property name="type" value="TCP" />
-                            <property name="host" value="0.0.0.0" />
-                            <property name="port" value="10500" />
-                        </bean>
-                    </property>
-
-                    <!--
-                        Configure secondary file system if needed.
-                    -->
-                    <!--
-                    <property name="secondaryFileSystem">
-                        <bean class="org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem">
-                            <property name="fileSystemFactory">
-                                <bean class="org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory">
-                                    <property name="uri" value="hdfs://your_hdfs_host:9000"/>
-                                </bean>
-                            </property>
-                        </bean>
-                    </property>
-                    -->
-                </bean>
-            </list>
-        </property>
-
-        <!--
-            TCP discovery SPI can be configured with list of addresses if multicast is not available.
-        -->
-        <!--
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
-                        <property name="addresses">
-                            <list>
-                                <value>127.0.0.1:47500..47509</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-        -->
-    </bean>
-</beans>
diff --git a/examples/config/filesystem/README.txt b/examples/config/filesystem/README.txt
deleted file mode 100644
index 4f6ae88..0000000
--- a/examples/config/filesystem/README.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-FileSystem Configuration Example
---------------------------------
-
-This folder contains configuration files for IgniteFs examples located in
-org.apache.ignite.examples.igfs package.
-
-- example-igfs.xml file is used to start Apache Ignite nodes with IgniteFS configured
-- core-site.xml file is used to run Hadoop FS driver over IgniteFs
diff --git a/examples/config/filesystem/core-site.xml b/examples/config/filesystem/core-site.xml
deleted file mode 100644
index b6f0291..0000000
--- a/examples/config/filesystem/core-site.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    Example configuration of the Hadoop FS driver over Ignite FS API.
-    Copy this file into '$HADOOP_HOME/conf/core-site.xml'.
--->
-<configuration>
-    <property>
-        <name>fs.default.name</name>
-        <value>igfs:///</value>
-    </property>
-
-    <property>
-        <!-- FS driver class for the 'igfs://' URIs. -->
-        <name>fs.igfs.impl</name>
-        <value>org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem</value>
-    </property>
-
-    <property>
-        <!-- FS driver class for the 'igfs://' URIs in Hadoop2.x -->
-        <name>fs.AbstractFileSystem.igfs.impl</name>
-        <value>org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem</value>
-    </property>
-</configuration>
diff --git a/examples/config/filesystem/example-igfs.xml b/examples/config/filesystem/example-igfs.xml
deleted file mode 100644
index 9e45450..0000000
--- a/examples/config/filesystem/example-igfs.xml
+++ /dev/null
@@ -1,118 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    Ignite Spring configuration file to startup ignite cache.
-
-    When starting a standalone node, you need to execute the following command:
-    {IGNITE_HOME}/bin/ignite.{bat|sh} examples/config/filesystem/example-igfs.xml
-
-    When starting Ignite from Java IDE, pass path to this file into Ignition:
-    Ignition.start("examples/config/filesystem/example-igfs.xml");
--->
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="http://www.springframework.org/schema/beans
-       http://www.springframework.org/schema/beans/spring-beans.xsd">
-
-    <!--
-        Optional description.
-    -->
-    <description>
-        Spring file for ignite configuration with client available endpoints.
-    </description>
-
-    <!--
-        Initialize property configurer so we can reference environment variables.
-    -->
-    <bean id="propertyConfigurer" class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
-        <property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_FALLBACK"/>
-        <property name="searchSystemEnvironment" value="true"/>
-    </bean>
-
-    <!--
-        Configuration below demonstrates how to setup a IgniteFs node with file data.
-    -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-        <property name="marshaller">
-            <bean class="org.apache.ignite.internal.binary.BinaryMarshaller" />
-        </property>
-
-        <property name="fileSystemConfiguration">
-            <list>
-                <bean class="org.apache.ignite.configuration.FileSystemConfiguration">
-                    <property name="name" value="igfs"/>
-
-                    <!-- Must correlate with cache affinity mapper. -->
-                    <property name="blockSize" value="#{128 * 1024}"/>
-                    <property name="perNodeBatchSize" value="512"/>
-                    <property name="perNodeParallelBatchCount" value="16"/>
-
-                    <!-- Set number of prefetch blocks. -->
-                    <property name="prefetchBlocks" value="32"/>
-
-                    <!--
-                        Example of configured IPC loopback endpoint.
-                    -->
-                    <!--
-                    <property name="ipcEndpointConfiguration">
-                        <bean class="org.apache.ignite.igfs.IgfsIpcEndpointConfiguration">
-                            <property name="type" value="TCP" />
-                        </bean>
-                    </property>
-                    -->
-
-                    <!--
-                        Example of configured shared memory endpoint.
-                    -->
-                    <!--
-                    <property name="ipcEndpointConfiguration">
-                        <bean class="org.apache.ignite.igfs.IgfsIpcEndpointConfiguration">
-                            <property name="type" value="SHMEM" />
-                        </bean>
-                    </property>
-                    -->
-                </bean>
-            </list>
-        </property>
-
-        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <!--
-                        Ignition provides several options for automatic discovery that can be used
-                        instead os static IP based discovery. For information on all options refer
-                        to our documentation: http://apacheignite.readme.io/docs/cluster-config
-                    -->
-                    <!-- Uncomment static IP finder to enable static-based discovery of initial nodes. -->
-                    <!--<bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">-->
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">
-                        <property name="addresses">
-                            <list>
-                                <!-- In distributed environment, replace with actual host IP address. -->
-                                <value>127.0.0.1:47500..47509</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsExample.java b/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsExample.java
deleted file mode 100644
index e2ef3f2..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsExample.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.igfs;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Collection;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.igfs.IgfsException;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.igfs.IgfsPath;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Example that shows usage of {@link org.apache.ignite.IgniteFileSystem} API. It starts a node with {@code IgniteFs}
- * configured and performs several file system operations (create, write, append, read and delete
- * files, create, list and delete directories).
- * <p>
- * Remote nodes should always be started with configuration file which includes
- * IGFS: {@code 'ignite.sh examples/config/filesystem/example-igfs.xml'}.
- * <p>
- * Alternatively you can run {@link IgfsNodeStartup} in another JVM which will start
- * node with {@code examples/config/filesystem/example-igfs.xml} configuration.
- */
-public final class IgfsExample {
-    /**
-     * Executes example.
-     *
-     * @param args Command line arguments, none required.
-     * @throws Exception If example execution failed.
-     */
-    public static void main(String[] args) throws Exception {
-        Ignite ignite = Ignition.start("examples/config/filesystem/example-igfs.xml");
-
-        System.out.println();
-        System.out.println(">>> IGFS example started.");
-
-        try {
-            // Get an instance of Ignite File System.
-            IgniteFileSystem fs = ignite.fileSystem("igfs");
-
-            // Working directory path.
-            IgfsPath workDir = new IgfsPath("/examples/fs");
-
-            // Cleanup working directory.
-            delete(fs, workDir);
-
-            // Create empty working directory.
-            mkdirs(fs, workDir);
-
-            // Print information for working directory.
-            printInfo(fs, workDir);
-
-            // File path.
-            IgfsPath filePath = new IgfsPath(workDir, "file.txt");
-
-            // Create file.
-            create(fs, filePath, new byte[] {1, 2, 3});
-
-            // Print information for file.
-            printInfo(fs, filePath);
-
-            // Append more data to previously created file.
-            append(fs, filePath, new byte[] {4, 5});
-
-            // Print information for file.
-            printInfo(fs, filePath);
-
-            // Read data from file.
-            read(fs, filePath);
-
-            // Delete file.
-            delete(fs, filePath);
-
-            // Print information for file.
-            printInfo(fs, filePath);
-
-            // Create several files.
-            for (int i = 0; i < 5; i++)
-                create(fs, new IgfsPath(workDir, "file-" + i + ".txt"), null);
-
-            list(fs, workDir);
-        }
-        finally {
-            Ignition.stop(false);
-        }
-    }
-
-    /**
-     * Deletes file or directory. If directory
-     * is not empty, it's deleted recursively.
-     *
-     * @param fs IGFS.
-     * @param path File or directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void delete(IgniteFileSystem fs, IgfsPath path) throws IgniteException {
-        assert fs != null;
-        assert path != null;
-
-        if (fs.exists(path)) {
-            boolean isFile = fs.info(path).isFile();
-
-            try {
-                fs.delete(path, true);
-
-                System.out.println();
-                System.out.println(">>> Deleted " + (isFile ? "file" : "directory") + ": " + path);
-            }
-            catch (IgfsException e) {
-                System.out.println();
-                System.out.println(">>> Failed to delete " + (isFile ? "file" : "directory") + " [path=" + path +
-                    ", msg=" + e.getMessage() + ']');
-            }
-        }
-        else {
-            System.out.println();
-            System.out.println(">>> Won't delete file or directory (doesn't exist): " + path);
-        }
-    }
-
-    /**
-     * Creates directories.
-     *
-     * @param fs IGFS.
-     * @param path Directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void mkdirs(IgniteFileSystem fs, IgfsPath path) throws IgniteException {
-        assert fs != null;
-        assert path != null;
-
-        try {
-            fs.mkdirs(path);
-
-            System.out.println();
-            System.out.println(">>> Created directory: " + path);
-        }
-        catch (IgfsException e) {
-            System.out.println();
-            System.out.println(">>> Failed to create a directory [path=" + path + ", msg=" + e.getMessage() + ']');
-        }
-
-        System.out.println();
-    }
-
-    /**
-     * Creates file and writes provided data to it.
-     *
-     * @param fs IGFS.
-     * @param path File path.
-     * @param data Data.
-     * @throws IgniteException If file can't be created.
-     * @throws IOException If data can't be written.
-     */
-    private static void create(IgniteFileSystem fs, IgfsPath path, @Nullable byte[] data)
-        throws IgniteException, IOException {
-        assert fs != null;
-        assert path != null;
-
-        try (OutputStream out = fs.create(path, true)) {
-            System.out.println();
-            System.out.println(">>> Created file: " + path);
-
-            if (data != null) {
-                out.write(data);
-
-                System.out.println();
-                System.out.println(">>> Wrote data to file: " + path);
-            }
-        }
-
-        System.out.println();
-    }
-
-    /**
-     * Opens file and appends provided data to it.
-     *
-     * @param fs IGFS.
-     * @param path File path.
-     * @param data Data.
-     * @throws IgniteException If file can't be created.
-     * @throws IOException If data can't be written.
-     */
-    private static void append(IgniteFileSystem fs, IgfsPath path, byte[] data) throws IgniteException, IOException {
-        assert fs != null;
-        assert path != null;
-        assert data != null;
-        assert fs.info(path).isFile();
-
-        try (OutputStream out = fs.append(path, true)) {
-            System.out.println();
-            System.out.println(">>> Opened file: " + path);
-
-            out.write(data);
-        }
-
-        System.out.println();
-        System.out.println(">>> Appended data to file: " + path);
-    }
-
-    /**
-     * Opens file and reads it to byte array.
-     *
-     * @param fs IgniteFs.
-     * @param path File path.
-     * @throws IgniteException If file can't be opened.
-     * @throws IOException If data can't be read.
-     */
-    private static void read(IgniteFileSystem fs, IgfsPath path) throws IgniteException, IOException {
-        assert fs != null;
-        assert path != null;
-        assert fs.info(path).isFile();
-
-        byte[] data = new byte[(int)fs.info(path).length()];
-
-        try (IgfsInputStream in = fs.open(path)) {
-            in.read(data);
-        }
-
-        System.out.println();
-        System.out.println(">>> Read data from " + path + ": " + Arrays.toString(data));
-    }
-
-    /**
-     * Lists files in directory.
-     *
-     * @param fs IGFS.
-     * @param path Directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void list(IgniteFileSystem fs, IgfsPath path) throws IgniteException {
-        assert fs != null;
-        assert path != null;
-        assert fs.info(path).isDirectory();
-
-        Collection<IgfsPath> files = fs.listPaths(path);
-
-        if (files.isEmpty()) {
-            System.out.println();
-            System.out.println(">>> No files in directory: " + path);
-        }
-        else {
-            System.out.println();
-            System.out.println(">>> List of files in directory: " + path);
-
-            for (IgfsPath f : files)
-                System.out.println(">>>     " + f.name());
-        }
-
-        System.out.println();
-    }
-
-    /**
-     * Prints information for file or directory.
-     *
-     * @param fs IGFS.
-     * @param path File or directory path.
-     * @throws IgniteException In case of error.
-     */
-    private static void printInfo(IgniteFileSystem fs, IgfsPath path) throws IgniteException {
-        System.out.println();
-        System.out.println("Information for " + path + ": " + fs.info(path));
-    }
-}
diff --git a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsMapReduceExample.java b/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsMapReduceExample.java
deleted file mode 100644
index 9365582..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsMapReduceExample.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.igfs;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashSet;
-import java.util.List;
-import java.util.TreeSet;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.compute.ComputeJobResult;
-import org.apache.ignite.igfs.IgfsOutputStream;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.mapreduce.IgfsFileRange;
-import org.apache.ignite.igfs.mapreduce.IgfsInputStreamJobAdapter;
-import org.apache.ignite.igfs.mapreduce.IgfsJob;
-import org.apache.ignite.igfs.mapreduce.IgfsRangeInputStream;
-import org.apache.ignite.igfs.mapreduce.IgfsTask;
-import org.apache.ignite.igfs.mapreduce.IgfsTaskArgs;
-import org.apache.ignite.igfs.mapreduce.records.IgfsNewLineRecordResolver;
-
-/**
- * Example that shows how to use {@link org.apache.ignite.igfs.mapreduce.IgfsTask} to find lines matching particular pattern in the file in pretty
- * the same way as {@code grep} command does.
- * <p>
- * Remote nodes should always be started with configuration file which includes
- * IGFS: {@code 'ignite.sh examples/config/filesystem/example-igfs.xml'}.
- * <p>
- * Alternatively you can run {@link IgfsNodeStartup} in another JVM which will start
- * node with {@code examples/config/filesystem/example-igfs.xml} configuration.
- */
-public class IgfsMapReduceExample {
-    /**
-     * Executes example.
-     *
-     * @param args Command line arguments. First argument is file name, second argument is regex to look for.
-     * @throws Exception If failed.
-     */
-    public static void main(String[] args) throws Exception {
-        if (args.length == 0)
-            System.out.println("Please provide file name and regular expression.");
-        else if (args.length == 1)
-            System.out.println("Please provide regular expression.");
-        else {
-            try (Ignite ignite = Ignition.start("examples/config/filesystem/example-igfs.xml")) {
-                System.out.println();
-                System.out.println(">>> IGFS map reduce example started.");
-
-                // Prepare arguments.
-                String fileName = args[0];
-
-                File file = new File(fileName);
-
-                String regexStr = args[1];
-
-                // Get an instance of Ignite File System.
-                IgniteFileSystem fs = ignite.fileSystem("igfs");
-
-                // Working directory path.
-                IgfsPath workDir = new IgfsPath("/examples/fs");
-
-                // Write file to IGFS.
-                IgfsPath fsPath = new IgfsPath(workDir, file.getName());
-
-                writeFile(fs, fsPath, file);
-
-                Collection<Line> lines = fs.execute(new GrepTask(), IgfsNewLineRecordResolver.NEW_LINE,
-                    Collections.singleton(fsPath), regexStr);
-
-                if (lines.isEmpty()) {
-                    System.out.println();
-                    System.out.println("No lines were found.");
-                }
-                else {
-                    System.out.println();
-                    System.out.println("Found lines:");
-
-                    for (Line line : lines)
-                        print(line.fileLine());
-                }
-            }
-        }
-    }
-
-    /**
-     * Write file to the Ignite file system.
-     *
-     * @param fs Ignite file system.
-     * @param fsPath Ignite file system path.
-     * @param file File to write.
-     * @throws Exception In case of exception.
-     */
-    private static void writeFile(IgniteFileSystem fs, IgfsPath fsPath, File file) throws Exception {
-        System.out.println();
-        System.out.println("Copying file to IGFS: " + file);
-
-        try (
-            IgfsOutputStream os = fs.create(fsPath, true);
-            FileInputStream fis = new FileInputStream(file)
-        ) {
-            byte[] buf = new byte[2048];
-
-            int read = fis.read(buf);
-
-            while (read != -1) {
-                os.write(buf, 0, read);
-
-                read = fis.read(buf);
-            }
-        }
-    }
-
-    /**
-     * Print particular string.
-     *
-     * @param str String.
-     */
-    private static void print(String str) {
-        System.out.println(">>> " + str);
-    }
-
-    /**
-     * Grep task.
-     */
-    private static class GrepTask extends IgfsTask<String, Collection<Line>> {
-        /** {@inheritDoc} */
-        @Override public IgfsJob createJob(IgfsPath path, IgfsFileRange range,
-            IgfsTaskArgs<String> args) {
-            return new GrepJob(args.userArgument());
-        }
-
-        /** {@inheritDoc} */
-        @Override public Collection<Line> reduce(List<ComputeJobResult> results) {
-            Collection<Line> lines = new TreeSet<>(new Comparator<Line>() {
-                @Override public int compare(Line line1, Line line2) {
-                    return line1.rangePosition() < line2.rangePosition() ? -1 :
-                        line1.rangePosition() > line2.rangePosition() ? 1 : line1.lineIndex() - line2.lineIndex();
-                }
-            });
-
-            for (ComputeJobResult res : results) {
-                if (res.getException() != null)
-                    throw res.getException();
-
-                Collection<Line> line = res.getData();
-
-                if (line != null)
-                    lines.addAll(line);
-            }
-
-            return lines;
-        }
-    }
-
-    /**
-     * Grep job.
-     */
-    private static class GrepJob extends IgfsInputStreamJobAdapter {
-        /** Regex string. */
-        private final String regex;
-
-        /**
-         * Constructor.
-         *
-         * @param regex Regex string.
-         */
-        private GrepJob(String regex) {
-            this.regex = regex;
-        }
-
-        /**  {@inheritDoc} */
-        @Override public Object execute(IgniteFileSystem igfs, IgfsRangeInputStream in) throws IgniteException, IOException {
-            Collection<Line> res = null;
-
-            long start = in.startOffset();
-
-            try (BufferedReader br = new BufferedReader(new InputStreamReader(in))) {
-                int ctr = 0;
-
-                String line = br.readLine();
-
-                while (line != null) {
-                    if (line.matches(".*" + regex + ".*")) {
-                        if (res == null)
-                            res = new HashSet<>();
-
-                        res.add(new Line(start, ctr++, line));
-                    }
-
-                    line = br.readLine();
-                }
-            }
-
-            return res;
-        }
-    }
-
-    /**
-     * Single file line with it's position.
-     */
-    private static class Line {
-        /** Line start position in the file. */
-        private long rangePos;
-
-        /** Matching line index within the range. */
-        private final int lineIdx;
-
-        /** File line. */
-        private String line;
-
-        /**
-         * Constructor.
-         *
-         * @param rangePos Range position.
-         * @param lineIdx Matching line index within the range.
-         * @param line File line.
-         */
-        private Line(long rangePos, int lineIdx, String line) {
-            this.rangePos = rangePos;
-            this.lineIdx = lineIdx;
-            this.line = line;
-        }
-
-        /**
-         * @return Range position.
-         */
-        public long rangePosition() {
-            return rangePos;
-        }
-
-        /**
-         * @return Matching line index within the range.
-         */
-        public int lineIndex() {
-            return lineIdx;
-        }
-
-        /**
-         * @return File line.
-         */
-        public String fileLine() {
-            return line;
-        }
-    }
-}
diff --git a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsNodeStartup.java b/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsNodeStartup.java
deleted file mode 100644
index 0540e6f..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsNodeStartup.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.igfs;
-
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.Ignition;
-
-/**
- * Starts up an empty node with IGFS configuration.
- * You can also start a stand-alone Ignite instance by passing the path
- * to configuration file to {@code 'ignite.{sh|bat}'} script, like so:
- * {@code 'ignite.sh examples/config/filesystem/example-igfs.xml'}.
- * <p>
- * The difference is that running this class from IDE adds all example classes to classpath
- * but running from command line doesn't.
- */
-public class IgfsNodeStartup {
-    /**
-     * Start up an empty node with specified cache configuration.
-     *
-     * @param args Command line arguments, none required.
-     * @throws IgniteException If example execution failed.
-     */
-    public static void main(String[] args) throws IgniteException {
-        Ignition.start("examples/config/filesystem/example-igfs.xml");
-    }
-}
diff --git a/examples/src/main/java/org/apache/ignite/examples/igfs/package-info.java b/examples/src/main/java/org/apache/ignite/examples/igfs/package-info.java
deleted file mode 100644
index f3e8fc9..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/igfs/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Demonstrates usage of Ignite File System.
- */
-
-package org.apache.ignite.examples.igfs;
diff --git a/examples/src/main/java/org/apache/ignite/examples/misc/client/memcache/MemcacheRestExampleNodeStartup.java b/examples/src/main/java/org/apache/ignite/examples/misc/client/memcache/MemcacheRestExampleNodeStartup.java
index fd806bd..9d5dce4 100644
--- a/examples/src/main/java/org/apache/ignite/examples/misc/client/memcache/MemcacheRestExampleNodeStartup.java
+++ b/examples/src/main/java/org/apache/ignite/examples/misc/client/memcache/MemcacheRestExampleNodeStartup.java
@@ -49,7 +49,7 @@ public class MemcacheRestExampleNodeStartup {
     }
 
     /**
-     * Create Ignite configuration with IGFS and enabled IPC.
+     * Create Ignite configuration with enabled IPC.
      *
      * @return Ignite configuration.
      * @throws IgniteException If configuration creation failed.
diff --git a/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java b/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java
deleted file mode 100644
index 76cb5ad..0000000
--- a/examples/src/test/java/org/apache/ignite/examples/IgfsExamplesSelfTest.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples;
-
-import org.apache.ignite.examples.igfs.IgfsExample;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.testframework.junits.common.GridAbstractExamplesTest;
-import org.junit.Test;
-
-/**
- * IGFS examples self test.
- */
-public class IgfsExamplesSelfTest extends GridAbstractExamplesTest {
-    /** IGFS config with shared memory IPC. */
-    private static final String IGFS_SHMEM_CFG = "modules/core/src/test/config/igfs-shmem.xml";
-
-    /** IGFS config with loopback IPC. */
-    private static final String IGFS_LOOPBACK_CFG = "modules/core/src/test/config/igfs-loopback.xml";
-
-    /**
-     * @throws Exception If failed.
-     */
-    @Test
-    public void testIgniteFsApiExample() throws Exception {
-        String cfgPath = U.isWindows() ? IGFS_LOOPBACK_CFG : IGFS_SHMEM_CFG;
-
-        try {
-            startGrid("test1", cfgPath);
-            startGrid("test2", cfgPath);
-            startGrid("test3", cfgPath);
-
-            IgfsExample.main(EMPTY_ARGS);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-}
diff --git a/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesSelfTestSuite.java b/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesSelfTestSuite.java
index 116d142..8295b94 100644
--- a/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesSelfTestSuite.java
+++ b/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesSelfTestSuite.java
@@ -35,7 +35,6 @@ import org.apache.ignite.examples.DeploymentExamplesSelfTest;
 import org.apache.ignite.examples.EncryptedCacheExampleSelfTest;
 import org.apache.ignite.examples.EventsExamplesMultiNodeSelfTest;
 import org.apache.ignite.examples.EventsExamplesSelfTest;
-import org.apache.ignite.examples.IgfsExamplesSelfTest;
 import org.apache.ignite.examples.LifecycleExamplesSelfTest;
 import org.apache.ignite.examples.MemcacheRestExamplesMultiNodeSelfTest;
 import org.apache.ignite.examples.MemcacheRestExamplesSelfTest;
@@ -72,7 +71,6 @@ import org.junit.runners.Suite;
     TaskExamplesSelfTest.class,
     SpringBeanExamplesSelfTest.class,
     SpringDataExampleSelfTest.class,
-    IgfsExamplesSelfTest.class,
     CheckpointExamplesSelfTest.class,
     ClusterGroupExampleSelfTest.class,
     CacheContinuousQueryExamplesSelfTest.class,
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java
index 1f30972..2e9742d 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java
@@ -58,9 +58,7 @@ import org.apache.ignite.cluster.ClusterState;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.DataRegionConfiguration;
 import org.apache.ignite.configuration.DataStorageConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.igfs.IgfsIpcEndpointConfiguration;
 import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlIndexMetadata;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlMetadata;
@@ -104,16 +102,6 @@ import org.apache.ignite.internal.visor.file.VisorFileBlockTask;
 import org.apache.ignite.internal.visor.file.VisorFileBlockTaskArg;
 import org.apache.ignite.internal.visor.file.VisorLatestTextFilesTask;
 import org.apache.ignite.internal.visor.file.VisorLatestTextFilesTaskArg;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsFormatTask;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsFormatTaskArg;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsProfilerClearTask;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsProfilerClearTaskArg;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsProfilerTask;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsProfilerTaskArg;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsResetMetricsTask;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsResetMetricsTaskArg;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsSamplingStateTask;
-import org.apache.ignite.internal.visor.igfs.VisorIgfsSamplingStateTaskArg;
 import org.apache.ignite.internal.visor.log.VisorLogSearchTask;
 import org.apache.ignite.internal.visor.log.VisorLogSearchTaskArg;
 import org.apache.ignite.internal.visor.misc.VisorAckTask;
@@ -2125,47 +2113,6 @@ public abstract class JettyRestProcessorAbstractSelfTest extends JettyRestProces
 
         jsonTaskResult(ret);
 
-        ret = content(new VisorGatewayArgument(VisorIgfsSamplingStateTask.class)
-            .forNode(locNode)
-            .argument(VisorIgfsSamplingStateTaskArg.class, "igfs", false));
-
-        info("VisorIgfsSamplingStateTask result: " + ret);
-
-        jsonTaskResult(ret);
-
-        ret = content(new VisorGatewayArgument(VisorIgfsProfilerClearTask.class)
-            .forNode(locNode)
-            .argument(VisorIgfsProfilerClearTaskArg.class, "igfs"));
-
-        info("VisorIgfsProfilerClearTask result: " + ret);
-
-        jsonTaskResult(ret);
-
-        ret = content(new VisorGatewayArgument(VisorIgfsProfilerTask.class)
-            .forNode(locNode)
-            .argument(VisorIgfsProfilerTaskArg.class, "igfs"));
-
-        info("VisorIgfsProfilerTask result: " + ret);
-
-        jsonTaskResult(ret);
-
-        ret = content(new VisorGatewayArgument(VisorIgfsFormatTask.class)
-            .forNode(locNode)
-            .argument(VisorIgfsFormatTaskArg.class, "igfs"));
-
-        info("VisorIgfsFormatTask result: " + ret);
-
-        jsonTaskResult(ret);
-
-        ret = content(new VisorGatewayArgument(VisorIgfsResetMetricsTask.class)
-            .forNode(locNode)
-            .argument(VisorIgfsResetMetricsTaskArg.class)
-            .set(String.class, "igfs"));
-
-        info("VisorIgfsResetMetricsTask result: " + ret);
-
-        jsonTaskResult(ret);
-
         ret = content(new VisorGatewayArgument(VisorThreadDumpTask.class)
             .forNode(locNode));
 
@@ -4021,13 +3968,6 @@ public abstract class JettyRestProcessorAbstractSelfTest extends JettyRestProces
     @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
 
-        FileSystemConfiguration igfs = new FileSystemConfiguration();
-
-        igfs.setName("igfs");
-        igfs.setIpcEndpointConfiguration(new IgfsIpcEndpointConfiguration());
-
-        cfg.setFileSystemConfiguration(igfs);
-
         DataStorageConfiguration dsCfg = new DataStorageConfiguration();
 
         DataRegionConfiguration drCfg = new DataRegionConfiguration();
diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java
index 98f9937..7c2d942 100644
--- a/modules/core/src/main/java/org/apache/ignite/Ignite.java
+++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java
@@ -66,7 +66,6 @@ import org.jetbrains.annotations.Nullable;
  * <li>{@link IgniteQueue} - distributed blocking queue.</li>
  * <li>{@link IgniteSet} - distributed concurrent set.</li>
  * <li>{@link IgniteScheduler} - functionality for scheduling jobs using UNIX Cron syntax.</li>
- * <li>{@link IgniteFileSystem} - functionality for distributed Hadoop-compliant in-memory file system and map-reduce.</li>
  * </ul>
  */
 public interface Ignite extends AutoCloseable {
@@ -412,27 +411,6 @@ public interface Ignite extends AutoCloseable {
     public <K, V> IgniteDataStreamer<K, V> dataStreamer(String cacheName) throws IllegalStateException;
 
     /**
-     * Gets an instance of IGFS (Ignite In-Memory File System). If one is not
-     * configured then {@link IllegalArgumentException} will be thrown.
-     * <p>
-     * IGFS is fully compliant with Hadoop {@code FileSystem} APIs and can
-     * be plugged into Hadoop installations. For more information refer to
-     * documentation on Hadoop integration shipped with Ignite.
-     *
-     * @param name IGFS name.
-     * @return IGFS instance.
-     * @throws IllegalArgumentException If IGFS with such name is not configured.
-     */
-    public IgniteFileSystem fileSystem(String name) throws IllegalArgumentException;
-
-    /**
-     * Gets all instances of IGFS (Ignite In-Memory File System).
-     *
-     * @return Collection of IGFS instances.
-     */
-    public Collection<IgniteFileSystem> fileSystems();
-
-    /**
      * Will get an atomic sequence from cache and create one if it has not been created yet and {@code create} flag
      * is {@code true}. It will use configuration from {@link IgniteConfiguration#getAtomicConfiguration()}.
      *
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java b/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java
deleted file mode 100644
index 3e3aaa7..0000000
--- a/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite;
-
-import java.util.Collection;
-import java.util.Map;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.igfs.IgfsMetrics;
-import org.apache.ignite.igfs.IgfsMode;
-import org.apache.ignite.igfs.IgfsOutputStream;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathNotFoundException;
-import org.apache.ignite.igfs.IgfsPathSummary;
-import org.apache.ignite.igfs.mapreduce.IgfsRecordResolver;
-import org.apache.ignite.igfs.mapreduce.IgfsTask;
-import org.apache.ignite.lang.IgniteAsyncSupport;
-import org.apache.ignite.lang.IgniteFuture;
-import org.apache.ignite.lang.IgniteUuid;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * <b>IG</b>nite <b>F</b>ile <b>S</b>ystem API. It provides a typical file system "view" on a particular cache:
- * <ul>
- *     <li>list directories or get information for a single path</li>
- *     <li>create/move/delete files or directories</li>
- *     <li>write/read data streams into/from files</li>
- * </ul>
- * The data of each file is split on separate data blocks and stored in the cache.
- * You can access file's data with a standard Java streaming API. Moreover, for each part
- * of the file you can calculate an affinity and process file's content on corresponding
- * nodes to escape unnecessary networking.
- * <p/>
- * This API is fully thread-safe and you can use it from several threads.
- * <h1 class="header">IGFS Configuration</h1>
- * The simplest way to run a Ignite node with configured file system is to pass
- * special configuration file included in Ignite distribution to {@code ignite.sh} or
- * {@code ignite.bat} scripts, like this: {@code ignite.sh config/hadoop/default-config.xml}
- * <p>
- * {@code IGFS} can be started as a data node or as a client node. Data node is responsible for
- * caching data, while client node is responsible for basic file system operations and accessing
- * data nodes remotely. When used as Hadoop file system, clients nodes usually started together
- * with {@code job-submitter} or {@code job-scheduler} processes, while data nodes are usually
- * started together with Hadoop {@code task-tracker} processes.
- * <h1 class="header">Integration With Hadoop</h1>
- * In addition to direct file system API, {@code IGFS} can be integrated with {@code Hadoop} by
- * plugging in as {@code Hadoop FileSystem}. Refer to
- * {@code org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem} or
- * {@code org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem} for more information.
- * <p>
- * <b>NOTE:</b> integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
- */
-public interface IgniteFileSystem extends IgniteAsyncSupport {
-    /** IGFS scheme name. */
-    public static final String IGFS_SCHEME = "igfs";
-
-    /**
-     * Gets IGFS name.
-     *
-     * @return IGFS name.
-     */
-    public String name();
-
-    /**
-     * Gets IGFS configuration.
-     *
-     * @return IGFS configuration.
-     */
-    public FileSystemConfiguration configuration();
-
-    /**
-     * Gets summary (total number of files, total number of directories and total length)
-     * for a given path.
-     *
-     * @param path Path to get information for.
-     * @return Summary object.
-     * @throws IgfsPathNotFoundException If path is not found.
-     * @throws IgniteException If failed.
-     */
-    public IgfsPathSummary summary(IgfsPath path) throws IgniteException;
-
-    /**
-     * Opens a file for reading.
-     *
-     * @param path File path to read.
-     * @return File input stream to read data from.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public IgfsInputStream open(IgfsPath path) throws IgniteException;
-
-    /**
-     * Opens a file for reading.
-     *
-     * @param path File path to read.
-     * @param bufSize Read buffer size (bytes) or {@code zero} to use default value.
-     * @return File input stream to read data from.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public IgfsInputStream open(IgfsPath path, int bufSize) throws IgniteException;
-
-    /**
-     * Opens a file for reading.
-     *
-     * @param path File path to read.
-     * @param bufSize Read buffer size (bytes) or {@code zero} to use default value.
-     * @param seqReadsBeforePrefetch Amount of sequential reads before prefetch is started.
-     * @return File input stream to read data from.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public IgfsInputStream open(IgfsPath path, int bufSize, int seqReadsBeforePrefetch) throws IgniteException;
-
-    /**
-     * Creates a file and opens it for writing.
-     *
-     * @param path File path to create.
-     * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory.
-     * @return File output stream to write data to.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsOutputStream create(IgfsPath path, boolean overwrite) throws IgniteException;
-
-    /**
-     * Creates a file and opens it for writing.
-     *
-     * @param path File path to create.
-     * @param bufSize Write buffer size (bytes) or {@code zero} to use default value.
-     * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory.
-     * @param replication Replication factor.
-     * @param blockSize Block size.
-     * @param props File properties to set.
-     * @return File output stream to write data to.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsOutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication,
-        long blockSize, @Nullable Map<String, String> props) throws IgniteException;
-
-    /**
-     * Creates a file and opens it for writing.
-     *
-     * @param path File path to create.
-     * @param bufSize Write buffer size (bytes) or {@code zero} to use default value.
-     * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory.
-     * @param affKey Affinity key used to store file blocks. If not {@code null}, the whole file will be
-     *      stored on node where {@code affKey} resides.
-     * @param replication Replication factor.
-     * @param blockSize Block size.
-     * @param props File properties to set.
-     * @return File output stream to write data to.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsOutputStream create(IgfsPath path, int bufSize, boolean overwrite,
-        @Nullable IgniteUuid affKey, int replication, long blockSize, @Nullable Map<String, String> props)
-        throws IgniteException;
-
-    /**
-     * Opens an output stream to an existing file for appending data.
-     *
-     * @param path File path to append.
-     * @param create Create file if it doesn't exist yet.
-     * @return File output stream to append data to.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist and create flag is {@code false}.
-     */
-    public IgfsOutputStream append(IgfsPath path, boolean create) throws IgniteException;
-
-    /**
-     * Opens an output stream to an existing file for appending data.
-     *
-     * @param path File path to append.
-     * @param bufSize Write buffer size (bytes) or {@code zero} to use default value.
-     * @param create Create file if it doesn't exist yet.
-     * @param props File properties to set only in case it file was just created.
-     * @return File output stream to append data to.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist and create flag is {@code false}.
-     */
-    public IgfsOutputStream append(IgfsPath path, int bufSize, boolean create, @Nullable Map<String, String> props)
-        throws IgniteException;
-
-    /**
-     * Sets last access time and last modification time for a given path. If argument is {@code null},
-     * corresponding time will not be changed.
-     *
-     * @param path Path to update.
-     * @param modificationTime Optional last modification time to set. Value {@code -1} does not update
-     *      modification time.
-     * @param accessTime Optional last access time to set. Value {@code -1} does not update access time.
-     * @throws IgfsPathNotFoundException If target was not found.
-     * @throws IgniteException If error occurred.
-     */
-    public void setTimes(IgfsPath path, long modificationTime, long accessTime) throws IgniteException;
-
-    /**
-     * Gets affinity block locations for data blocks of the file, i.e. the nodes, on which the blocks
-     * are stored.
-     *
-     * @param path File path to get affinity for.
-     * @param start Position in the file to start affinity resolution from.
-     * @param len Size of data in the file to resolve affinity for.
-     * @return Affinity block locations.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len) throws IgniteException;
-
-    /**
-     * Get affinity block locations for data blocks of the file. In case {@code maxLen} parameter is set and
-     * particular block location length is greater than this value, block locations will be split into smaller
-     * chunks.
-     *
-     * @param path File path to get affinity for.
-     * @param start Position in the file to start affinity resolution from.
-     * @param len Size of data in the file to resolve affinity for.
-     * @param maxLen Maximum length of a single returned block location length.
-     * @return Affinity block locations.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len, long maxLen)
-        throws IgniteException;
-
-    /**
-     * Gets metrics snapshot for this file system.
-     *
-     * @return Metrics.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsMetrics metrics() throws IgniteException;
-
-    /**
-     * Resets metrics for this file system.
-     *
-     * @throws IgniteException In case of error.
-     */
-    public void resetMetrics() throws IgniteException;
-
-    /**
-     * Determines size of the file denoted by provided path. In case if path is a directory, then
-     * total size of all containing entries will be calculated recursively.
-     *
-     * @param path File system path.
-     * @return Total size.
-     * @throws IgniteException In case of error.
-     */
-    public long size(IgfsPath path) throws IgniteException;
-
-    /**
-     * Formats the file system removing all existing entries from it, but not removing anything in secondary
-     * file system (if any).
-     *
-     * @throws IgniteException In case clear failed.
-     */
-    public void clear() throws IgniteException;
-
-    /**
-     * Formats the file system removing all existing entries from it, but not removing anything in secondary
-     * file system (if any).
-     *
-     * @return Future representing pending completion of the clear operation.
-     */
-    public IgniteFuture<Void> clearAsync() throws IgniteException;
-
-    /**
-     * Executes IGFS task.
-     *
-     * @param task Task to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param arg Optional task argument.
-     * @return Task result.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> R execute(IgfsTask<T, R> task, @Nullable IgfsRecordResolver rslvr,
-        Collection<IgfsPath> paths, @Nullable T arg) throws IgniteException;
-
-    /**
-     * Executes IGFS task asynchronously.
-     *
-     * @param task Task to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param arg Optional task argument.
-     * @return a Future representing pending completion of the task.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> IgniteFuture<R> executeAsync(IgfsTask<T, R> task, @Nullable IgfsRecordResolver rslvr,
-        Collection<IgfsPath> paths, @Nullable T arg) throws IgniteException;
-
-    /**
-     * Executes IGFS task with overridden maximum range length (see
-     * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information).
-     * <p>
-     * Supports asynchronous execution (see {@link IgniteAsyncSupport}).
-     *
-     * @param task Task to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param skipNonExistentFiles Whether to skip non existent files. If set to {@code true} non-existent files will
-     *     be ignored. Otherwise an exception will be thrown.
-     * @param maxRangeLen Optional maximum range length. If {@code 0}, then by default all consecutive
-     *      IGFS blocks will be included.
-     * @param arg Optional task argument.
-     * @return Task result.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> R execute(IgfsTask<T, R> task, @Nullable IgfsRecordResolver rslvr,
-        Collection<IgfsPath> paths, boolean skipNonExistentFiles, long maxRangeLen, @Nullable T arg)
-        throws IgniteException;
-
-    /**
-     * Executes IGFS task asynchronously with overridden maximum range length (see
-     * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information).
-     *
-     * @param task Task to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param skipNonExistentFiles Whether to skip non existent files. If set to {@code true} non-existent files will
-     *     be ignored. Otherwise an exception will be thrown.
-     * @param maxRangeLen Optional maximum range length. If {@code 0}, then by default all consecutive
-     *      IGFS blocks will be included.
-     * @param arg Optional task argument.
-     * @return a Future representing pending completion of the task.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> IgniteFuture<R> executeAsync(IgfsTask<T, R> task, @Nullable IgfsRecordResolver rslvr,
-        Collection<IgfsPath> paths, boolean skipNonExistentFiles, long maxRangeLen, @Nullable T arg)
-        throws IgniteException;
-
-    /**
-     * Executes IGFS task.
-     * <p>
-     * Supports asynchronous execution (see {@link IgniteAsyncSupport}).
-     *
-     * @param taskCls Task class to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param arg Optional task argument.
-     * @return Task result.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> R execute(Class<? extends IgfsTask<T, R>> taskCls,
-        @Nullable IgfsRecordResolver rslvr, Collection<IgfsPath> paths, @Nullable T arg) throws IgniteException;
-
-    /**
-     * Executes IGFS task asynchronously.
-     *
-     * @param taskCls Task class to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param arg Optional task argument.
-     * @return a Future representing pending completion of the task.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> IgniteFuture<R> executeAsync(Class<? extends IgfsTask<T, R>> taskCls,
-        @Nullable IgfsRecordResolver rslvr, Collection<IgfsPath> paths, @Nullable T arg) throws IgniteException;
-
-    /**
-     * Executes IGFS task with overridden maximum range length (see
-     * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information).
-     * <p>
-     * Supports asynchronous execution (see {@link IgniteAsyncSupport}).
-     *
-     * @param taskCls Task class to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param skipNonExistentFiles Whether to skip non existent files. If set to {@code true} non-existent files will
-     *     be ignored. Otherwise an exception will be thrown.
-     * @param maxRangeLen Maximum range length.
-     * @param arg Optional task argument.
-     * @return Task result.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> R execute(Class<? extends IgfsTask<T, R>> taskCls,
-        @Nullable IgfsRecordResolver rslvr, Collection<IgfsPath> paths, boolean skipNonExistentFiles,
-        long maxRangeLen, @Nullable T arg) throws IgniteException;
-
-    /**
-     * Executes IGFS task asynchronously with overridden maximum range length (see
-     * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information).
-     *
-     * @param taskCls Task class to execute.
-     * @param rslvr Optional resolver to control split boundaries.
-     * @param paths Collection of paths to be processed within this task.
-     * @param skipNonExistentFiles Whether to skip non existent files. If set to {@code true} non-existent files will
-     *     be ignored. Otherwise an exception will be thrown.
-     * @param maxRangeLen Maximum range length.
-     * @param arg Optional task argument.
-     * @return a Future representing pending completion of the task.
-     * @throws IgniteException If execution failed.
-     */
-    public <T, R> IgniteFuture<R> executeAsync(Class<? extends IgfsTask<T, R>> taskCls,
-        @Nullable IgfsRecordResolver rslvr, Collection<IgfsPath> paths, boolean skipNonExistentFiles,
-        long maxRangeLen, @Nullable T arg) throws IgniteException;
-
-    /**
-     * Checks if the specified path exists in the file system.
-     *
-     * @param path Path to check for existence in the file system.
-     * @return {@code True} if such file exists, otherwise - {@code false}.
-     * @throws IgniteException In case of error.
-     */
-    public boolean exists(IgfsPath path);
-
-    /**
-     * Updates file information for the specified path. Existent properties, not listed in the passed collection,
-     * will not be affected. Other properties will be added or overwritten. Passed properties with {@code null} values
-     * will be removed from the stored properties or ignored if they don't exist in the file info.
-     * <p>
-     * When working in {@code DUAL_SYNC} or {@code DUAL_ASYNC} modes with Hadoop secondary file system only the following properties will be updated:
-     * <ul>
-     * <li>{@code usrName} - file owner name;</li>
-     * <li>{@code grpName} - file owner group;</li>
-     * <li>{@code permission} - Unix-style string representing file permissions.</li>
-     * </ul>
-     *
-     * @param path File path to set properties for.
-     * @param props Properties to update.
-     * @return File information for specified path or {@code null} if such path does not exist.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteException;
-
-    /**
-     * Renames/moves a file.
-     * <p>
-     * You are free to rename/move data files as you wish, but directories can be only renamed.
-     * You cannot move the directory between different parent directories.
-     * <p>
-     * Examples:
-     * <ul>
-     *     <li>"/work/file.txt" => "/home/project/Presentation Scenario.txt"</li>
-     *     <li>"/work" => "/work-2012.bkp"</li>
-     *     <li>"/work" => "<strike>/backups/work</strike>" - such operation is restricted for directories.</li>
-     * </ul>
-     *
-     * @param src Source file path to rename.
-     * @param dest Destination file path. If destination path is a directory, then source file will be placed
-     *     into destination directory with original name.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If source file doesn't exist.
-     */
-    public void rename(IgfsPath src, IgfsPath dest) throws IgniteException;
-
-    /**
-     * Deletes file.
-     *
-     * @param path File path to delete.
-     * @param recursive Delete non-empty directories recursively.
-     * @return {@code True} in case of success, {@code false} otherwise.
-     * @throws IgniteException In case of error.
-     */
-    public boolean delete(IgfsPath path, boolean recursive) throws IgniteException;
-
-    /**
-     * Creates directories under specified path.
-     *
-     * @param path Path of directories chain to create.
-     * @throws IgniteException In case of error.
-     */
-    public void mkdirs(IgfsPath path) throws IgniteException;
-
-    /**
-     * Creates directories under specified path with the specified properties.
-     * Note that the properties are applied only to created directories, but never
-     * updated for existing ones.
-     *
-     * @param path Path of directories chain to create.
-     * @param props Metadata properties to set on created directories.
-     * @throws IgniteException In case of error.
-     */
-    public void mkdirs(IgfsPath path, @Nullable Map<String, String> props) throws IgniteException;
-
-    /**
-     * Lists file paths under the specified path.
-     *
-     * @param path Path to list files under.
-     * @return List of paths under the specified path.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteException;
-
-    /**
-     * Lists files under the specified path.
-     *
-     * @param path Path to list files under.
-     * @return List of files under the specified path.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteException;
-
-    /**
-     * Gets file information for the specified path.
-     *
-     * @param path Path to get information for.
-     * @return File information for specified path or {@code null} if such path does not exist.
-     * @throws IgniteException In case of error.
-     */
-    @Nullable public IgfsFile info(IgfsPath path) throws IgniteException;
-
-    /**
-     * Get mode for the given path.
-     *
-     * @param path Path.
-     * @return Mode used for this path.
-     */
-    public IgfsMode mode(IgfsPath path);
-
-    /**
-     * Gets used space in bytes.
-     *
-     * @return Used space in bytes.
-     * @throws IgniteException In case of error.
-     */
-    public long usedSpaceSize() throws IgniteException;
-
-    /** {@inheritDoc} */
-    @Deprecated
-    @Override public IgniteFileSystem withAsync();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsEvictionFilter.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsEvictionFilter.java
deleted file mode 100644
index 9de9d50..0000000
--- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsEvictionFilter.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.eviction.igfs;
-
-import javax.cache.Cache;
-import org.apache.ignite.cache.eviction.EvictionFilter;
-import org.apache.ignite.internal.processors.igfs.IgfsBlockKey;
-
-/**
- * IGFS eviction filter which will not evict blocks of particular files.
- */
-public class IgfsEvictionFilter implements EvictionFilter {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public boolean evictAllowed(Cache.Entry entry) {
-        Object key = entry.getKey();
-
-        return !(key instanceof IgfsBlockKey && ((IgfsBlockKey)key).evictExclude());
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsPerBlockLruEvictionPolicy.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsPerBlockLruEvictionPolicy.java
deleted file mode 100644
index ad26ae4..0000000
--- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsPerBlockLruEvictionPolicy.java
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.eviction.igfs;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.LongAdder;
-import java.util.regex.Pattern;
-import java.util.regex.PatternSyntaxException;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.cache.eviction.EvictableEntry;
-import org.apache.ignite.cache.eviction.EvictionPolicy;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.processors.cache.CacheEvictableEntryImpl;
-import org.apache.ignite.internal.processors.igfs.IgfsBlockKey;
-import org.apache.ignite.mxbean.IgniteMBeanAware;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ConcurrentLinkedDeque8;
-import org.jsr166.ConcurrentLinkedDeque8.Node;
-
-/**
- * IGFS eviction policy which evicts particular blocks.
- */
-public class IgfsPerBlockLruEvictionPolicy implements EvictionPolicy<IgfsBlockKey, byte[]>, IgniteMBeanAware,
-    Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Maximum size. When reached, eviction begins. */
-    private volatile long maxSize;
-
-    /** Maximum amount of blocks. When reached, eviction begins. */
-    private volatile int maxBlocks;
-
-    /** Collection of regex for paths which must not be evicted. */
-    private volatile Collection<String> excludePaths;
-
-    /** Exclusion patterns. */
-    private volatile Collection<Pattern> excludePatterns;
-
-    /** Whether patterns must be recompiled during the next call. */
-    private final AtomicBoolean excludeRecompile = new AtomicBoolean(true);
-
-    /** Queue. */
-    private final ConcurrentLinkedDeque8<EvictableEntry<IgfsBlockKey, byte[]>> queue =
-        new ConcurrentLinkedDeque8<>();
-
-    /** Current size of all enqueued blocks in bytes. */
-    private final LongAdder curSize = new LongAdder();
-
-    /**
-     * Default constructor.
-     */
-    public IgfsPerBlockLruEvictionPolicy() {
-        // No-op.
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param maxSize Maximum size. When reached, eviction begins.
-     * @param maxBlocks Maximum amount of blocks. When reached, eviction begins.
-     */
-    public IgfsPerBlockLruEvictionPolicy(long maxSize, int maxBlocks) {
-        this(maxSize, maxBlocks, null);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param maxSize Maximum size. When reached, eviction begins.
-     * @param maxBlocks Maximum amount of blocks. When reached, eviction begins.
-     * @param excludePaths Collection of regex for path which must not be evicted.
-     */
-    public IgfsPerBlockLruEvictionPolicy(long maxSize, int maxBlocks, @Nullable Collection<String> excludePaths) {
-        this.maxSize = maxSize;
-        this.maxBlocks = maxBlocks;
-        this.excludePaths = excludePaths;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onEntryAccessed(boolean rmv, EvictableEntry<IgfsBlockKey, byte[]> entry) {
-        if (!rmv) {
-            if (!entry.isCached())
-                return;
-
-            if (touch(entry))
-                shrink();
-        }
-        else {
-            MetaEntry meta = entry.removeMeta();
-
-            if (meta != null && queue.unlinkx(meta.node()))
-                changeSize(-meta.size());
-        }
-    }
-
-    /**
-     * @param entry Entry to touch.
-     * @return {@code True} if new node has been added to queue by this call.
-     */
-    private boolean touch(EvictableEntry<IgfsBlockKey, byte[]> entry) {
-        byte[] val = peek(entry);
-
-        int blockSize = val != null ? val.length : 0;
-
-        MetaEntry meta = entry.meta();
-
-        // Entry has not been enqueued yet.
-        if (meta == null) {
-            while (true) {
-                Node<EvictableEntry<IgfsBlockKey, byte[]>> node = queue.offerLastx(entry);
-
-                meta = new MetaEntry(node, blockSize);
-
-                if (entry.putMetaIfAbsent(meta) != null) {
-                    // Was concurrently added, need to clear it from queue.
-                    queue.unlinkx(node);
-
-                    // Queue has not been changed.
-                    return false;
-                }
-                else if (node.item() != null) {
-                    if (!entry.isCached()) {
-                        // Was concurrently evicted, need to clear it from queue.
-                        queue.unlinkx(node);
-
-                        return false;
-                    }
-
-                    // Increment current size.
-                    changeSize(blockSize);
-
-                    return true;
-                }
-                // If node was unlinked by concurrent shrink() call, we must repeat the whole cycle.
-                else if (!entry.removeMeta(node))
-                    return false;
-            }
-        }
-        else {
-            int oldBlockSize = meta.size();
-
-            Node<EvictableEntry<IgfsBlockKey, byte[]>> node = meta.node();
-
-            if (queue.unlinkx(node)) {
-                // Move node to tail.
-                Node<EvictableEntry<IgfsBlockKey, byte[]>> newNode = queue.offerLastx(entry);
-
-                int delta = blockSize - oldBlockSize;
-
-                if (!entry.replaceMeta(meta, new MetaEntry(newNode, blockSize))) {
-                    // Was concurrently added, need to clear it from queue.
-                    if (queue.unlinkx(newNode))
-                        delta -= blockSize;
-                }
-
-                if (delta != 0) {
-                    changeSize(delta);
-
-                   if (delta > 0)
-                       // Total size increased, so shrinking could be needed.
-                       return true;
-                }
-            }
-        }
-
-        // Entry is already in queue.
-        return false;
-    }
-
-    /**
-     * @param entry Entry.
-     * @return Peeked value.
-     */
-    @Nullable private byte[] peek(EvictableEntry<IgfsBlockKey, byte[]> entry) {
-        return (byte[])((CacheEvictableEntryImpl)entry).peek();
-    }
-
-    /**
-     * Shrinks queue to maximum allowed size.
-     */
-    private void shrink() {
-        long maxSize = this.maxSize;
-        int maxBlocks = this.maxBlocks;
-
-        int cnt = queue.sizex();
-
-        for (int i = 0; i < cnt && (maxBlocks > 0 && queue.sizex() > maxBlocks ||
-            maxSize > 0 && curSize.longValue() > maxSize); i++) {
-            EvictableEntry<IgfsBlockKey, byte[]> entry = queue.poll();
-
-            if (entry == null)
-                break; // Queue is empty.
-
-            byte[] val = peek(entry);
-
-            if (val != null)
-                changeSize(-val.length); // Change current size as we polled entry from the queue.
-
-            if (!entry.evict()) {
-                // Reorder entries which we failed to evict.
-                entry.removeMeta();
-
-                touch(entry);
-            }
-        }
-    }
-
-    /**
-     * Change current size.
-     *
-     * @param delta Delta in bytes.
-     */
-    private void changeSize(int delta) {
-        if (delta != 0)
-            curSize.add(delta);
-    }
-
-    /**
-     * Gets maximum allowed size of all blocks in bytes.
-     *
-     * @return Maximum allowed size of all blocks in bytes.
-     */
-    public long getMaxSize() {
-        return maxSize;
-    }
-
-    /**
-     * Sets maximum allowed size of data in all blocks in bytes.
-     *
-     * @param maxSize Maximum allowed size of data in all blocks in bytes.
-     *
-     * @return {@code this} for chaining.
-     */
-    public IgfsPerBlockLruEvictionPolicy setMaxSize(long maxSize) {
-        this.maxSize = maxSize;
-
-        return this;
-    }
-
-    /**
-     * Gets maximum allowed amount of blocks.
-     *
-     * @return Maximum allowed amount of blocks.
-     */
-    public int getMaxBlocks() {
-        return maxBlocks;
-    }
-
-    /**
-     * Sets maximum allowed amount of blocks.
-     *
-     * @param maxBlocks Maximum allowed amount of blocks.
-     *
-     * @return {@code this} for chaining.
-     */
-    public IgfsPerBlockLruEvictionPolicy setMaxBlocks(int maxBlocks) {
-        this.maxBlocks = maxBlocks;
-
-        return this;
-    }
-
-    /**
-     * Gets collection of regex for paths whose blocks must not be evicted.
-     *
-     * @return Collection of regex for paths whose blocks must not be evicted.
-     */
-    public Collection<String> getExcludePaths() {
-        return Collections.unmodifiableCollection(excludePaths);
-    }
-
-    /**
-     * Sets collection of regex for paths whose blocks must not be evicted.
-     *
-     * @param excludePaths Collection of regex for paths whose blocks must not be evicted.
-     *
-     * @return {@code this} for chaining.
-     */
-    public IgfsPerBlockLruEvictionPolicy setExcludePaths(@Nullable Collection<String> excludePaths) {
-        this.excludePaths = excludePaths;
-
-        excludeRecompile.set(true);
-
-        return this;
-    }
-
-    /**
-     * Gets current size of data in all blocks.
-     *
-     * @return Current size of data in all blocks.
-     */
-    public long getCurrentSize() {
-        return curSize.longValue();
-    }
-
-    /**
-     * Gets current amount of blocks.
-     *
-     * @return Current amount of blocks.
-     */
-    public int getCurrentBlocks() {
-        return queue.size();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object getMBean() {
-        return new IgfsPerBlockLruEvictionPolicyMXBeanImpl();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeLong(maxSize);
-        out.writeInt(maxBlocks);
-        out.writeObject(excludePaths);
-        out.writeObject(excludePatterns);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        maxSize = in.readLong();
-        maxBlocks = in.readInt();
-        excludePaths = (Collection<String>)in.readObject();
-        excludePatterns = (Collection<Pattern>)in.readObject();
-    }
-
-    /**
-     * Check whether provided path must be excluded from evictions.
-     *
-     * @param path Path.
-     * @return {@code True} in case non block of related file must be excluded.
-     * @throws IgniteCheckedException In case of faulty patterns.
-     */
-    public boolean exclude(IgfsPath path) throws IgniteCheckedException {
-        assert path != null;
-
-        Collection<Pattern> excludePatterns0;
-
-        if (excludeRecompile.compareAndSet(true, false)) {
-            // Recompile.
-            Collection<String> excludePaths0 = excludePaths;
-
-            if (excludePaths0 != null) {
-                excludePatterns0 = new HashSet<>(excludePaths0.size(), 1.0f);
-
-                for (String excludePath : excludePaths0) {
-                    try {
-                        excludePatterns0.add(Pattern.compile(excludePath));
-                    }
-                    catch (PatternSyntaxException ignore) {
-                        throw new IgniteCheckedException("Invalid regex pattern: " + excludePath);
-                    }
-                }
-
-                excludePatterns = excludePatterns0;
-            }
-            else
-                excludePatterns0 = excludePatterns = null;
-        }
-        else
-            excludePatterns0 = excludePatterns;
-
-        if (excludePatterns0 != null) {
-            String pathStr = path.toString();
-
-            for (Pattern pattern : excludePatterns0) {
-                if (pattern.matcher(pathStr).matches())
-                    return true;
-            }
-        }
-
-        return false;
-    }
-
-    /**
-     * Meta entry.
-     */
-    private static class MetaEntry {
-        /** Queue node. */
-        private final Node<EvictableEntry<IgfsBlockKey, byte[]>> node;
-
-        /** Data size. */
-        private final int size;
-
-        /**
-         * Constructor.
-         *
-         * @param node Queue node.
-         * @param size Data size.
-         */
-        private MetaEntry(Node<EvictableEntry<IgfsBlockKey, byte[]>> node, int size) {
-            assert node != null;
-            assert size >= 0;
-
-            this.node = node;
-            this.size = size;
-        }
-
-        /**
-         * @return Queue node.
-         */
-        private Node<EvictableEntry<IgfsBlockKey, byte[]>> node() {
-            return node;
-        }
-
-        /**
-         * @return Data size.
-         */
-        private int size() {
-            return size;
-        }
-    }
-
-    /**
-     * MBean implementation for IgfsPerBlockLruEvictionPolicy.
-     */
-    private class IgfsPerBlockLruEvictionPolicyMXBeanImpl implements IgfsPerBlockLruEvictionPolicyMXBean {
-        /** {@inheritDoc} */
-        @Override public long getMaxSize() {
-            return IgfsPerBlockLruEvictionPolicy.this.getMaxSize();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void setMaxSize(long maxSize) {
-            IgfsPerBlockLruEvictionPolicy.this.setMaxSize(maxSize);
-        }
-
-        /** {@inheritDoc} */
-        @Override public int getMaxBlocks() {
-            return IgfsPerBlockLruEvictionPolicy.this.getMaxBlocks();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void setMaxBlocks(int maxBlocks) {
-            IgfsPerBlockLruEvictionPolicy.this.setMaxBlocks(maxBlocks);
-        }
-
-        /** {@inheritDoc} */
-        @Nullable @Override public Collection<String> getExcludePaths() {
-            return IgfsPerBlockLruEvictionPolicy.this.getExcludePaths();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void setExcludePaths(@Nullable Collection<String> excludePaths) {
-            IgfsPerBlockLruEvictionPolicy.this.setExcludePaths(excludePaths);
-        }
-
-        /** {@inheritDoc} */
-        @Override public long getCurrentSize() {
-            return IgfsPerBlockLruEvictionPolicy.this.getCurrentSize();
-        }
-
-        /** {@inheritDoc} */
-        @Override public int getCurrentBlocks() {
-            return IgfsPerBlockLruEvictionPolicy.this.getCurrentBlocks();
-        }
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsPerBlockLruEvictionPolicyMXBean.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsPerBlockLruEvictionPolicyMXBean.java
deleted file mode 100644
index cfdf3e8..0000000
--- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/IgfsPerBlockLruEvictionPolicyMXBean.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.eviction.igfs;
-
-import java.util.Collection;
-import org.apache.ignite.mxbean.MXBeanDescription;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * MBean for {@code IGFS per-block LRU} eviction policy.
- */
-@MXBeanDescription("MBean for IGFS per-block LRU cache eviction policy.")
-public interface IgfsPerBlockLruEvictionPolicyMXBean {
-    /**
-     * Gets maximum allowed size of all blocks in bytes.
-     *
-     * @return Maximum allowed size of all blocks in bytes.
-     */
-    @MXBeanDescription("Maximum allowed size of all blocks in bytes.")
-    public long getMaxSize();
-
-    /**
-     * Sets maximum allowed size of data in all blocks in bytes.
-     *
-     * @param maxSize Maximum allowed size of data in all blocks in bytes.
-     */
-    @MXBeanDescription("Sets aximum allowed size of data in all blocks in bytes.")
-    public void setMaxSize(long maxSize);
-
-    /**
-     * Gets maximum allowed amount of blocks.
-     *
-     * @return Maximum allowed amount of blocks.
-     */
-    @MXBeanDescription("Maximum allowed amount of blocks.")
-    public int getMaxBlocks();
-
-    /**
-     * Sets maximum allowed amount of blocks.
-     *
-     * @param maxBlocks Maximum allowed amount of blocks.
-     */
-    @MXBeanDescription("Sets maximum allowed amount of blocks.")
-    public void setMaxBlocks(int maxBlocks);
-
-    /**
-     * Gets collection of regex for paths whose blocks must not be evicted.
-     *
-     * @return Collection of regex for paths whose blocks must not be evicted.
-     */
-    @MXBeanDescription("Collection of regex for paths whose blocks must not be evicted.")
-    @Nullable public Collection<String> getExcludePaths();
-
-    /**
-     * Sets collection of regex for paths whose blocks must not be evicted.
-     *
-     * @param excludePaths Collection of regex for paths whose blocks must not be evicted.
-     */
-    @MXBeanDescription("Sets collection of regex for paths whose blocks must not be evicted.")
-    public void setExcludePaths(@Nullable Collection<String> excludePaths);
-
-    /**
-     * Gets current size of data in all blocks.
-     *
-     * @return Current size of data in all blocks.
-     */
-    @MXBeanDescription("Current size of data in all blocks.")
-    public long getCurrentSize();
-
-    /**
-     * Gets current amount of blocks.
-     *
-     * @return Current amount of blocks.
-     */
-    @MXBeanDescription("Current amount of blocks.")
-    public int getCurrentBlocks();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/package-info.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/package-info.java
deleted file mode 100644
index edd0ff1..0000000
--- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/igfs/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains IGFS LRU eviction policy implementations.
- */
-
-package org.apache.ignite.cache.eviction.igfs;
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
deleted file mode 100644
index 58197d6..0000000
--- a/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
+++ /dev/null
@@ -1,837 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.configuration;
-
-import java.util.Map;
-import org.apache.ignite.cache.CacheMode;
-import org.apache.ignite.igfs.IgfsIpcEndpointConfiguration;
-import org.apache.ignite.igfs.IgfsMode;
-import org.apache.ignite.igfs.IgfsOutputStream;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} configuration. More than one file system can be configured within grid.
- * {@code IGFS} configuration is provided via {@link IgniteConfiguration#getFileSystemConfiguration()}
- * method.
- */
-public class FileSystemConfiguration {
-    /** Default file system user name. */
-    public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
-
-    /** Default fragmentizer throttling block length. */
-    public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16L * 1024 * 1024;
-
-    /** Default fragmentizer throttling delay. */
-    public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
-
-    /** Default fragmentizer concurrent files. */
-    public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
-
-    /** Fragmentizer enabled property. */
-    public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
-
-    /** Default batch size for logging. */
-    public static final int DFLT_IGFS_LOG_BATCH_SIZE = 100;
-
-    /** Default {@code IGFS} log directory. */
-    public static final String DFLT_IGFS_LOG_DIR = "work/igfs/log";
-
-    /** Default per node buffer size. */
-    public static final int DFLT_PER_NODE_BATCH_SIZE = 512;
-
-    /** Default number of per node parallel operations. */
-    public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 16;
-
-    /** Default IGFS mode. */
-    public static final IgfsMode DFLT_MODE = IgfsMode.DUAL_ASYNC;
-
-    /** Default file's data block size (bytes). */
-    public static final int DFLT_BLOCK_SIZE = 1 << 16;
-
-    /** Default read/write buffers size (bytes). */
-    public static final int DFLT_BUF_SIZE = 1 << 16;
-
-    /** Default management port. */
-    public static final int DFLT_MGMT_PORT = 11400;
-
-    /** Default IPC endpoint enabled flag. */
-    public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
-
-    /** Default value of metadata co-location flag. */
-    public static final boolean DFLT_COLOCATE_META = true;
-
-    /** Default value of relaxed consistency flag. */
-    public static final boolean DFLT_RELAXED_CONSISTENCY = true;
-
-    /** Default value of update file length on flush flag. */
-    public static final boolean DFLT_UPDATE_FILE_LEN_ON_FLUSH = false;
-
-    /** IGFS instance name. */
-    private String name;
-
-    /** File's data block size (bytes). */
-    private int blockSize = DFLT_BLOCK_SIZE;
-
-    /** The number of pre-fetched blocks if specific file's chunk is requested. */
-    private int prefetchBlocks;
-
-    /** Amount of sequential block reads before prefetch is triggered. */
-    private int seqReadsBeforePrefetch;
-
-    /** Read/write buffers size for stream operations (bytes). */
-    private int bufSize = DFLT_BUF_SIZE;
-
-    /** Per node buffer size. */
-    private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
-
-    /** Per node parallel operations. */
-    private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
-
-    /** IPC endpoint configuration. */
-    private IgfsIpcEndpointConfiguration ipcEndpointCfg;
-
-    /** IPC endpoint enabled flag. */
-    private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
-
-    /** Management port. */
-    private int mgmtPort = DFLT_MGMT_PORT;
-
-    /** Secondary file system */
-    private IgfsSecondaryFileSystem secondaryFs;
-
-    /** IGFS mode. */
-    private IgfsMode dfltMode = DFLT_MODE;
-
-    /** Fragmentizer throttling block length. */
-    private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
-
-    /** Fragmentizer throttling delay. */
-    private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
-
-    /** Fragmentizer concurrent files. */
-    private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
-
-    /** Fragmentizer enabled flag. */
-    private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
-
-    /** Path modes. */
-    private Map<String, IgfsMode> pathModes;
-
-    /** Maximum range length. */
-    private long maxTaskRangeLen;
-
-    /** Metadata co-location flag. */
-    private boolean colocateMeta = DFLT_COLOCATE_META;
-
-    /** Relaxed consistency flag. */
-    private boolean relaxedConsistency = DFLT_RELAXED_CONSISTENCY;
-
-    /** Update file length on flush flag. */
-    private boolean updateFileLenOnFlush = DFLT_UPDATE_FILE_LEN_ON_FLUSH;
-
-    /** Meta cache config. */
-    private CacheConfiguration metaCacheCfg;
-
-    /** Data cache config. */
-    private CacheConfiguration dataCacheCfg;
-
-    /**
-     * Constructs default configuration.
-     */
-    public FileSystemConfiguration() {
-        // No-op.
-    }
-
-    /**
-     * Constructs the copy of the configuration.
-     *
-     * @param cfg Configuration to copy.
-     */
-    public FileSystemConfiguration(FileSystemConfiguration cfg) {
-        assert cfg != null;
-
-        /*
-         * Must preserve alphabetical order!
-         */
-        blockSize = cfg.getBlockSize();
-        bufSize = cfg.getBufferSize();
-        colocateMeta = cfg.isColocateMetadata();
-        dataCacheCfg = cfg.getDataCacheConfiguration();
-        dfltMode = cfg.getDefaultMode();
-        fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
-        fragmentizerEnabled = cfg.isFragmentizerEnabled();
-        fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
-        fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
-        secondaryFs = cfg.getSecondaryFileSystem();
-        ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
-        ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
-        maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
-        metaCacheCfg = cfg.getMetaCacheConfiguration();
-        mgmtPort = cfg.getManagementPort();
-        name = cfg.getName();
-        pathModes = cfg.getPathModes();
-        perNodeBatchSize = cfg.getPerNodeBatchSize();
-        perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
-        prefetchBlocks = cfg.getPrefetchBlocks();
-        relaxedConsistency = cfg.isRelaxedConsistency();
-        seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
-        updateFileLenOnFlush = cfg.isUpdateFileLengthOnFlush();
-    }
-
-    /**
-     * Gets IGFS instance name.
-     *
-     * @return IGFS instance name.
-     */
-    public String getName() {
-        return name;
-    }
-
-    /**
-     * Sets IGFS instance name.
-     *
-     * @param name IGFS instance name.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setName(String name) {
-        if (name == null)
-            throw new IllegalArgumentException("IGFS name cannot be null");
-
-        this.name = name;
-
-        return this;
-    }
-
-    /**
-     * Cache config to store IGFS meta information.
-     *
-     * @return Cache configuration object.
-     */
-    @Nullable public CacheConfiguration getMetaCacheConfiguration() {
-        return metaCacheCfg;
-    }
-
-    /**
-     * Cache config to store IGFS meta information. If {@code null}, then default config for
-     * meta-cache will be used.
-     *
-     * Default configuration for the meta cache is:
-     * <ul>
-     *     <li>atomicityMode = TRANSACTIONAL</li>
-     *     <li>cacheMode = PARTITIONED</li>
-     *     <li>backups = 1</li>
-     * </ul>
-     *
-     * @param metaCacheCfg Cache configuration object.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setMetaCacheConfiguration(CacheConfiguration metaCacheCfg) {
-        this.metaCacheCfg = metaCacheCfg;
-
-        return this;
-    }
-
-    /**
-     * Cache config to store IGFS data.
-     *
-     * @return Cache configuration object.
-     */
-    @Nullable public CacheConfiguration getDataCacheConfiguration() {
-        return dataCacheCfg;
-    }
-
-    /**
-     * Cache config to store IGFS data. If {@code null}, then default config for
-     * data cache will be used.
-     *
-     * Default configuration for the data cache is:
-     * <ul>
-     *     <<li>atomicityMode = TRANSACTIONAL</li>
-     *     <li>cacheMode = PARTITIONED</li>
-     *     <li>backups = 0</li>
-     * </ul>
-     *
-     * @param dataCacheCfg Cache configuration object.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setDataCacheConfiguration(CacheConfiguration dataCacheCfg) {
-        this.dataCacheCfg = dataCacheCfg;
-
-        return this;
-    }
-
-    /**
-     * Get file's data block size.
-     *
-     * @return File's data block size.
-     */
-    public int getBlockSize() {
-        return blockSize;
-    }
-
-    /**
-     * Sets file's data block size.
-     *
-     * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setBlockSize(int blockSize) {
-        A.ensure(blockSize >= 0, "blockSize >= 0");
-
-        this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
-
-        return this;
-    }
-
-    /**
-     * Get number of pre-fetched blocks if specific file's chunk is requested.
-     *
-     * @return The number of pre-fetched blocks.
-     */
-    public int getPrefetchBlocks() {
-        return prefetchBlocks;
-    }
-
-    /**
-     * Sets the number of pre-fetched blocks if specific file's chunk is requested.
-     *
-     * @param prefetchBlocks New number of pre-fetched blocks.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setPrefetchBlocks(int prefetchBlocks) {
-        A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
-
-        this.prefetchBlocks = prefetchBlocks;
-
-        return this;
-    }
-
-    /**
-     * Get amount of sequential block reads before prefetch is triggered. The
-     * higher this value, the longer IGFS will wait before starting to prefetch
-     * values ahead of time. Depending on the use case, this can either help
-     * or hurt performance.
-     * <p>
-     * Default is {@code 0} which means that pre-fetching will start right away.
-     * <h1 class="header">Integration With Hadoop</h1>
-     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
-     * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
-     * MapReduce task.
-     * <p>
-     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
-     *
-     * @return Amount of sequential block reads.
-     */
-    public int getSequentialReadsBeforePrefetch() {
-        return seqReadsBeforePrefetch;
-    }
-
-    /**
-     * Sets amount of sequential block reads before prefetch is triggered. The
-     * higher this value, the longer IGFS will wait before starting to prefetch
-     * values ahead of time. Depending on the use case, this can either help
-     * or hurt performance.
-     * <p>
-     * Default is {@code 0} which means that pre-fetching will start right away.
-     * <h1 class="header">Integration With Hadoop</h1>
-     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
-     * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
-     * MapReduce task.
-     * <p>
-     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
-     *
-     * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
-        A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
-
-        this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
-
-        return this;
-    }
-
-    /**
-     * Get read/write buffer size for {@code IGFS} stream operations in bytes.
-     *
-     * @return Read/write buffers size (bytes).
-     */
-    public int getBufferSize() {
-        return bufSize;
-    }
-
-    /**
-     * Sets read/write buffers size for {@code IGFS} stream operations (bytes).
-     *
-     * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setBufferSize(int bufSize) {
-        A.ensure(bufSize >= 0, "bufSize >= 0");
-
-        this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
-
-        return this;
-    }
-
-    /**
-     * Gets number of file blocks buffered on local node before sending batch to remote node.
-     *
-     * @return Per node buffer size.
-     */
-    public int getPerNodeBatchSize() {
-        return perNodeBatchSize;
-    }
-
-    /**
-     * Sets number of file blocks collected on local node before sending batch to remote node.
-     *
-     * @param perNodeBatchSize Per node buffer size.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setPerNodeBatchSize(int perNodeBatchSize) {
-        this.perNodeBatchSize = perNodeBatchSize;
-
-        return this;
-    }
-
-    /**
-     * Gets number of batches that can be concurrently sent to remote node.
-     *
-     * @return Number of batches for each node.
-     */
-    public int getPerNodeParallelBatchCount() {
-        return perNodeParallelBatchCnt;
-    }
-
-    /**
-     * Sets number of file block batches that can be concurrently sent to remote node.
-     *
-     * @param perNodeParallelBatchCnt Per node parallel load operations.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
-        this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
-
-        return this;
-    }
-
-    /**
-     * Gets IPC endpoint configuration.
-     * <p>
-     * Endpoint is needed for communication between IGFS and {@code IgniteHadoopFileSystem} shipped with <b>Ignite
-     * Hadoop Accelerator</b>.
-     *
-     * @return IPC endpoint configuration.
-     */
-    @Nullable public IgfsIpcEndpointConfiguration getIpcEndpointConfiguration() {
-        return ipcEndpointCfg;
-    }
-
-    /**
-     * Sets IPC endpoint configuration.
-     * <p>
-     * Endpoint is needed for communication between IGFS and {@code IgniteHadoopFileSystem} shipped with <b>Ignite
-     * Hadoop Accelerator</b>.
-     *
-     * @param ipcEndpointCfg IPC endpoint configuration.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setIpcEndpointConfiguration(@Nullable IgfsIpcEndpointConfiguration ipcEndpointCfg) {
-        this.ipcEndpointCfg = ipcEndpointCfg;
-
-        return this;
-    }
-
-    /**
-     * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
-     * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
-     * <p>
-     * Endpoint is needed for communication between IGFS and {@code IgniteHadoopFileSystem} shipped with <b>Ignite
-     * Hadoop Accelerator</b>.
-     *
-     * @return {@code True} in case endpoint is enabled.
-     */
-    public boolean isIpcEndpointEnabled() {
-        return ipcEndpointEnabled;
-    }
-
-    /**
-     * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
-     * <p>
-     * Endpoint is needed for communication between IGFS and {@code IgniteHadoopFileSystem} shipped with <b>Ignite
-     * Hadoop Accelerator</b>.
-     *
-     * @param ipcEndpointEnabled IPC endpoint enabled flag.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
-        this.ipcEndpointEnabled = ipcEndpointEnabled;
-
-        return this;
-    }
-
-    /**
-     * Gets port number for management endpoint. All IGFS nodes should have this port open
-     * for Visor Management Console to work with IGFS.
-     * <p>
-     * Default value is {@link #DFLT_MGMT_PORT}
-     *
-     * @return Port number or {@code -1} if management endpoint should be disabled.
-     */
-    public int getManagementPort() {
-        return mgmtPort;
-    }
-
-    /**
-     * Sets management endpoint port.
-     *
-     * @param mgmtPort port number or {@code -1} to disable management endpoint.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setManagementPort(int mgmtPort) {
-        this.mgmtPort = mgmtPort;
-
-        return this;
-    }
-
-    /**
-     * Gets mode to specify how {@code IGFS} interacts with Hadoop file system, like {@code HDFS}.
-     * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
-     * purposes.
-     * <p>
-     * Default mode is {@link org.apache.ignite.igfs.IgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
-     * not configured, this mode will work just like {@link org.apache.ignite.igfs.IgfsMode#PRIMARY} mode.
-     *
-     * @return Mode to specify how IGFS interacts with secondary HDFS file system.
-     */
-    public IgfsMode getDefaultMode() {
-        return dfltMode;
-    }
-
-    /**
-     * Sets {@code IGFS} mode to specify how it should interact with secondary
-     * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
-     * for pass-through, write-through, and read-through purposes.
-     *
-     * @param dfltMode {@code IGFS} mode.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setDefaultMode(IgfsMode dfltMode) {
-        this.dfltMode = dfltMode;
-
-        return this;
-    }
-
-    /**
-     * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
-     * and read-through purposes.
-     *
-     * @return Secondary file system.
-     */
-    public IgfsSecondaryFileSystem getSecondaryFileSystem() {
-        return secondaryFs;
-    }
-
-    /**
-     * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
-     * and read-through purposes.
-     *
-     * @param fileSystem Secondary file system.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setSecondaryFileSystem(IgfsSecondaryFileSystem fileSystem) {
-        secondaryFs = fileSystem;
-
-        return this;
-    }
-
-    /**
-     * Gets map of path prefixes to {@code IGFS} modes used for them.
-     * <p>
-     * If path doesn't correspond to any specified prefix or mappings are not provided, then
-     * {@link #getDefaultMode()} is used.
-     *
-     * @return Map of paths to {@code IGFS} modes.
-     */
-    @Nullable public Map<String, IgfsMode> getPathModes() {
-        return pathModes;
-    }
-
-    /**
-     * Sets map of path prefixes to {@code IGFS} modes used for them.
-     * <p>
-     * If path doesn't correspond to any specified prefix or mappings are not provided, then
-     * {@link #getDefaultMode()} is used.
-     *
-     * @param pathModes Map of paths to {@code IGFS} modes.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setPathModes(Map<String, IgfsMode> pathModes) {
-        this.pathModes = pathModes;
-
-        return this;
-    }
-
-    /**
-     * Gets the length of file chunk to send before delaying the fragmentizer.
-     *
-     * @return File chunk length in bytes.
-     */
-    public long getFragmentizerThrottlingBlockLength() {
-        return fragmentizerThrottlingBlockLen;
-    }
-
-    /**
-     * Sets length of file chunk to transmit before throttling is delayed.
-     *
-     * @param fragmentizerThrottlingBlockLen Block length in bytes.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
-        this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
-
-        return this;
-    }
-
-    /**
-     * Gets throttle delay for fragmentizer.
-     *
-     * @return Throttle delay in milliseconds.
-     */
-    public long getFragmentizerThrottlingDelay() {
-        return fragmentizerThrottlingDelay;
-    }
-
-    /**
-     * Sets delay in milliseconds for which fragmentizer is paused.
-     *
-     * @param fragmentizerThrottlingDelay Delay in milliseconds.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
-        this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
-
-        return this;
-    }
-
-    /**
-     * Gets number of files that can be processed by fragmentizer concurrently.
-     *
-     * @return Number of files to process concurrently.
-     */
-    public int getFragmentizerConcurrentFiles() {
-        return fragmentizerConcurrentFiles;
-    }
-
-    /**
-     * Sets number of files to process concurrently by fragmentizer.
-     *
-     * @param fragmentizerConcurrentFiles Number of files to process concurrently.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
-        this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
-
-        return this;
-    }
-
-    /**
-     * Gets flag indicating whether IGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
-     * written in distributed fashion.
-     *
-     * @return Flag indicating whether fragmentizer is enabled.
-     */
-    public boolean isFragmentizerEnabled() {
-        return fragmentizerEnabled;
-    }
-
-    /**
-     * Sets property indicating whether fragmentizer is enabled.
-     *
-     * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setFragmentizerEnabled(boolean fragmentizerEnabled) {
-        this.fragmentizerEnabled = fragmentizerEnabled;
-
-        return this;
-    }
-
-    /**
-     * Get maximum default range size of a file being split during IGFS task execution. When IGFS task is about to
-     * be executed, it requests file block locations first. Each location is defined as {@link org.apache.ignite.igfs.mapreduce.IgfsFileRange} which
-     * has length. In case this parameter is set to positive value, then IGFS will split single file range into smaller
-     * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
-     * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
-     * block size.
-     * <p>
-     * Note that this parameter is applied when task is split into jobs before {@link org.apache.ignite.igfs.mapreduce.IgfsRecordResolver} is
-     * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
-     * parameter depending on file data layout and selected resolver type.
-     * <p>
-     * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
-     * so that task execution suffers from insufficient parallelism. E.g., in case you have one IGFS node in topology
-     * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
-     * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
-     * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
-     * parallel.
-     * <p>
-     * Note that some {@code IgniteFs.execute()} methods can override value of this parameter.
-     * <p>
-     * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
-     * {@code 0}.
-     *
-     * @return Maximum range size of a file being split during IGFS task execution.
-     */
-    public long getMaximumTaskRangeLength() {
-        return maxTaskRangeLen;
-    }
-
-    /**
-     * Set maximum default range size of a file being split during IGFS task execution.
-     * See {@link #getMaximumTaskRangeLength()} for more details.
-     *
-     * @param maxTaskRangeLen Set maximum default range size of a file being split during IGFS task execution.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setMaximumTaskRangeLength(long maxTaskRangeLen) {
-        this.maxTaskRangeLen = maxTaskRangeLen;
-
-        return this;
-    }
-
-    /**
-     * Get whether to co-locate metadata on a single node.
-     * <p>
-     * Normally Ignite spread ownership of particular keys among all cache nodes. Transaction with keys owned by
-     * different nodes will produce more network traffic and will require more time to complete comparing to
-     * transaction with keys owned only by a single node.
-     * <p>
-     * IGFS stores information about file system structure (metadata) inside a transactional cache configured through
-     * {@link #getMetaCacheConfiguration()} property. Metadata updates caused by operations on IGFS usually require
-     * several internal keys to be updated. As IGFS metadata cache usually operates
-     * in {@link CacheMode#REPLICATED} mode, meaning that all nodes have all metadata locally, it makes sense to give
-     * a hint to Ignite to co-locate ownership of all metadata keys on a single node.
-     * This will decrease amount of network trips required to update metadata and hence could improve performance.
-     * <p>
-     * This property should be disabled if you see excessive CPU and network load on a single node, which
-     * degrades performance and cannot be explained by business logic of your application.
-     * <p>
-     * This settings is only used if metadata cache is configured in {@code CacheMode#REPLICATED} mode. Otherwise it
-     * is ignored.
-     * <p>
-     * Defaults to {@link #DFLT_COLOCATE_META}.
-     *
-     * @return {@code True} if metadata co-location is enabled.
-     */
-    public boolean isColocateMetadata() {
-        return colocateMeta;
-    }
-
-    /**
-     * Set metadata co-location flag.
-     * <p>
-     * See {@link #isColocateMetadata()} for more information.
-     *
-     * @param colocateMeta Whether metadata co-location is enabled.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setColocateMetadata(boolean colocateMeta) {
-        this.colocateMeta = colocateMeta;
-
-        return this;
-    }
-
-    /**
-     * Get relaxed consistency flag.
-     * <p>
-     * Concurrent file system operations might conflict with each other. E.g. {@code move("/a1/a2", "/b")} and
-     * {@code move("/b", "/a1")}. Hence, it is necessary to atomically verify that participating paths are still
-     * on their places to keep file system in consistent state in such cases. These checks are expensive in
-     * distributed environment.
-     * <p>
-     * Real applications, e.g. Hadoop jobs, rarely produce conflicting operations. So additional checks could be
-     * skipped in these scenarios without any negative effect on file system integrity. It significantly increases
-     * performance of file system operations.
-     * <p>
-     * If value of this flag is {@code true}, IGFS will skip expensive consistency checks. It is recommended to set
-     * this flag to {@code false} if your application has conflicting operations, or you do not how exactly users will
-     * use your system.
-     * <p>
-     * This property affects only {@link IgfsMode#PRIMARY} paths.
-     * <p>
-     * Defaults to {@link #DFLT_RELAXED_CONSISTENCY}.
-     *
-     * @return {@code True} if relaxed consistency is enabled.
-     */
-    public boolean isRelaxedConsistency() {
-        return relaxedConsistency;
-    }
-
-    /**
-     * Set relaxed consistency flag.
-     * <p>
-     * See {@link #isColocateMetadata()} for more information.
-     *
-     * @param relaxedConsistency Whether to use relaxed consistency optimization.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setRelaxedConsistency(boolean relaxedConsistency) {
-        this.relaxedConsistency = relaxedConsistency;
-
-        return this;
-    }
-
-    /**
-     * Get whether to update file length on flush.
-     * <p>
-     * Controls whether to update file length or not when {@link IgfsOutputStream#flush()} method is invoked. You
-     * may want to set this property to true in case you want to read from a file which is being written at the
-     * same time.
-     * <p>
-     * Defaults to {@link #DFLT_UPDATE_FILE_LEN_ON_FLUSH}.
-     *
-     * @return Whether to update file length on flush.
-     */
-    public boolean isUpdateFileLengthOnFlush() {
-        return updateFileLenOnFlush;
-    }
-
-    /**
-     * Set whether to update file length on flush.
-     * <p>
-     * Set {@link #isUpdateFileLengthOnFlush()} for more information.
-     *
-     * @param updateFileLenOnFlush Whether to update file length on flush.
-     * @return {@code this} for chaining.
-     */
-    public FileSystemConfiguration setUpdateFileLengthOnFlush(boolean updateFileLenOnFlush) {
-        this.updateFileLenOnFlush = updateFileLenOnFlush;
-
-        return this;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(FileSystemConfiguration.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
deleted file mode 100644
index 653125d..0000000
--- a/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.configuration;
-
-import org.apache.ignite.hadoop.HadoopMapReducePlanner;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.lifecycle.LifecycleBean;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Ignite Hadoop Accelerator configuration.
- */
-public class HadoopConfiguration {
-    /** Default finished job info time-to-live. */
-    public static final long DFLT_FINISHED_JOB_INFO_TTL = 30_000;
-
-    /** Default value for external execution flag. */
-    public static final boolean DFLT_EXTERNAL_EXECUTION = false;
-
-    /** Default value for the max parallel tasks. */
-    public static final int DFLT_MAX_PARALLEL_TASKS = Runtime.getRuntime().availableProcessors() * 2;
-
-    /** Default value for the max task queue size. */
-    public static final int DFLT_MAX_TASK_QUEUE_SIZE = 8192;
-
-    /** Map reduce planner. */
-    private HadoopMapReducePlanner planner;
-
-    /** */
-    private boolean extExecution = DFLT_EXTERNAL_EXECUTION;
-
-    /** Finished job info TTL. */
-    private long finishedJobInfoTtl = DFLT_FINISHED_JOB_INFO_TTL;
-
-    /** */
-    private int maxParallelTasks = DFLT_MAX_PARALLEL_TASKS;
-
-    /** */
-    private int maxTaskQueueSize = DFLT_MAX_TASK_QUEUE_SIZE;
-
-    /** Library names. */
-    private String[] libNames;
-
-    /**
-     * Default constructor.
-     */
-    public HadoopConfiguration() {
-        // No-op.
-    }
-
-    /**
-     * Copy constructor.
-     *
-     * @param cfg Configuration to copy.
-     */
-    public HadoopConfiguration(HadoopConfiguration cfg) {
-        // Preserve alphabetic order.
-        // TODO: IGNITE-404: Uncomment when fixed.
-        //extExecution = cfg.isExternalExecution();
-        finishedJobInfoTtl = cfg.getFinishedJobInfoTtl();
-        planner = cfg.getMapReducePlanner();
-        maxParallelTasks = cfg.getMaxParallelTasks();
-        maxTaskQueueSize = cfg.getMaxTaskQueueSize();
-        libNames = cfg.getNativeLibraryNames();
-    }
-
-    /**
-     * Gets max number of local tasks that may be executed in parallel.
-     *
-     * @return Max number of local tasks that may be executed in parallel.
-     */
-    public int getMaxParallelTasks() {
-        return maxParallelTasks;
-    }
-
-    /**
-     * Sets max number of local tasks that may be executed in parallel.
-     *
-     * @param maxParallelTasks Max number of local tasks that may be executed in parallel.
-     * @return {@code this} for chaining.
-     */
-    public HadoopConfiguration setMaxParallelTasks(int maxParallelTasks) {
-        this.maxParallelTasks = maxParallelTasks;
-
-        return this;
-    }
-
-    /**
-     * Gets max task queue size.
-     *
-     * @return Max task queue size.
-     */
-    public int getMaxTaskQueueSize() {
-        return maxTaskQueueSize;
-    }
-
-    /**
-     * Sets max task queue size.
-     *
-     * @param maxTaskQueueSize Max task queue size.
-     * @return {@code this} for chaining.
-     */
-    public HadoopConfiguration setMaxTaskQueueSize(int maxTaskQueueSize) {
-        this.maxTaskQueueSize = maxTaskQueueSize;
-
-        return this;
-    }
-
-    /**
-     * Gets finished job info time-to-live in milliseconds.
-     *
-     * @return Finished job info time-to-live.
-     */
-    public long getFinishedJobInfoTtl() {
-        return finishedJobInfoTtl;
-    }
-
-    /**
-     * Sets finished job info time-to-live.
-     *
-     * @param finishedJobInfoTtl Finished job info time-to-live.
-     * @return {@code this} for chaining.
-     */
-    public HadoopConfiguration setFinishedJobInfoTtl(long finishedJobInfoTtl) {
-        this.finishedJobInfoTtl = finishedJobInfoTtl;
-
-        return this;
-    }
-
-    /**
-     * Gets external task execution flag. If {@code true}, hadoop job tasks will be executed in an external
-     * (relative to node) process.
-     *
-     * @return {@code True} if external execution.
-     */
-    // TODO: IGNITE-404: Uncomment when fixed.
-//    public boolean isExternalExecution() {
-//        return extExecution;
-//    }
-
-    /**
-     * Sets external task execution flag.
-     *
-     * @param extExecution {@code True} if tasks should be executed in an external process.
-     * @see #isExternalExecution()
-     * @return {@code this} for chaining.
-     */
-    // TODO: IGNITE-404: Uncomment when fixed.
-//
-//    public HadoopConfiguration setExternalExecution(boolean extExecution) {
-//        this.extExecution = extExecution;
-//
-//        return this;
-//    }
-
-    /**
-     * Gets Hadoop map-reduce planner, a component which defines job execution plan based on job
-     * configuration and current grid topology.
-     *
-     * @return Map-reduce planner.
-     */
-    public HadoopMapReducePlanner getMapReducePlanner() {
-        return planner;
-    }
-
-    /**
-     * Sets Hadoop map-reduce planner, a component which defines job execution plan based on job
-     * configuration and current grid topology.
-     *
-     * @param planner Map-reduce planner.
-     * @return {@code this} for chaining.
-     */
-    public HadoopConfiguration setMapReducePlanner(HadoopMapReducePlanner planner) {
-        this.planner = planner;
-
-        return this;
-    }
-
-    /**
-     * Get native library names.
-     * <p>
-     * Ignite Hadoop Accelerator executes all Hadoop jobs and tasks in the same process, isolating them with help
-     * of classloaders. If Hadoop job or task loads a native library, it might lead to exception, because Java do
-     * not allow to load the same library multiple times from different classloaders. To overcome the problem,
-     * you should to the following:
-     * <ul>
-     *     <li>Load necessary libraries in advance from base classloader; {@link LifecycleBean} is a good candidate
-     *     for this;</li>
-     *     <li>Add names of loaded libraries to this property, so that Hadoop engine is able to link them;</li>
-     *     <li>Remove {@link System#load(String)} and {@link System#loadLibrary(String)} calls from your job/task.</li>     *
-     * </ul>
-     *
-     * @return Native library names.
-     */
-    @Nullable public String[] getNativeLibraryNames() {
-        return libNames;
-    }
-
-    /**
-     * Set native library names. See {@link #getNativeLibraryNames()} for more information.
-     *
-     * @param libNames Native library names.
-     * @return {@code this} for chaining.
-     */
-    public HadoopConfiguration setNativeLibraryNames(@Nullable String... libNames) {
-        this.libNames = libNames;
-
-        return this;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopConfiguration.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
index 9ead827..71c0535 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
@@ -300,9 +300,6 @@ public class IgniteConfiguration {
     /** Management pool size. */
     private int mgmtPoolSize = DFLT_MGMT_THREAD_CNT;
 
-    /** IGFS pool size. */
-    private int igfsPoolSize = AVAILABLE_PROC_CNT;
-
     /** Data stream pool size. */
     private int dataStreamerPoolSize = DFLT_DATA_STREAMER_POOL_SIZE;
 
@@ -503,15 +500,9 @@ public class IgniteConfiguration {
     /** Local event listeners. */
     private Map<IgnitePredicate<? extends Event>, int[]> lsnrs;
 
-    /** IGFS configuration. */
-    private FileSystemConfiguration[] igfsCfg;
-
     /** Service configuration. */
     private ServiceConfiguration[] svcCfgs;
 
-    /** Hadoop configuration. */
-    private HadoopConfiguration hadoopCfg;
-
     /** Client access configuration. */
     private ConnectorConfiguration connectorCfg = new ConnectorConfiguration();
 
@@ -677,9 +668,6 @@ public class IgniteConfiguration {
         discoStartupDelay = cfg.getDiscoveryStartupDelay();
         execCfgs = cfg.getExecutorConfiguration();
         failureDetectionTimeout = cfg.getFailureDetectionTimeout();
-        hadoopCfg = cfg.getHadoopConfiguration();
-        igfsCfg = cfg.getFileSystemConfiguration();
-        igfsPoolSize = cfg.getIgfsThreadPoolSize();
         failureHnd = cfg.getFailureHandler();
         igniteHome = cfg.getIgniteHome();
         igniteInstanceName = cfg.getIgniteInstanceName();
@@ -1047,17 +1035,6 @@ public class IgniteConfiguration {
     }
 
     /**
-     * Size of thread pool that is in charge of processing outgoing IGFS messages.
-     * <p>
-     * If not provided, executor service will have size equals number of processors available in system.
-     *
-     * @return Thread pool size to be used for IGFS outgoing message sending.
-     */
-    public int getIgfsThreadPoolSize() {
-        return igfsPoolSize;
-    }
-
-    /**
      * Size of thread pool that is in charge of processing data stream messages.
      * <p>
      * If not provided, executor service will have size {@link #DFLT_DATA_STREAMER_POOL_SIZE}.
@@ -1269,19 +1246,6 @@ public class IgniteConfiguration {
     }
 
     /**
-     * Set thread pool size that will be used to process outgoing IGFS messages.
-     *
-     * @param poolSize Executor service to use for outgoing IGFS messages.
-     * @see IgniteConfiguration#getIgfsThreadPoolSize()
-     * @return {@code this} for chaining.
-     */
-    public IgniteConfiguration setIgfsThreadPoolSize(int poolSize) {
-        igfsPoolSize = poolSize;
-
-        return this;
-    }
-
-    /**
      * Set thread pool size that will be used to process data stream messages.
      *
      * @param poolSize Executor service to use for data stream messages.
@@ -3067,48 +3031,6 @@ public class IgniteConfiguration {
     }
 
     /**
-     * Gets IGFS (Ignite In-Memory File System) configurations.
-     *
-     * @return IGFS configurations.
-     */
-    public FileSystemConfiguration[] getFileSystemConfiguration() {
-        return igfsCfg;
-    }
-
-    /**
-     * Sets IGFS (Ignite In-Memory File System) configurations.
-     *
-     * @param igfsCfg IGFS configurations.
-     * @return {@code this} for chaining.
-     */
-    public IgniteConfiguration setFileSystemConfiguration(FileSystemConfiguration... igfsCfg) {
-        this.igfsCfg = igfsCfg;
-
-        return this;
-    }
-
-    /**
-     * Gets hadoop configuration.
-     *
-     * @return Hadoop configuration.
-     */
-    public HadoopConfiguration getHadoopConfiguration() {
-        return hadoopCfg;
-    }
-
-    /**
-     * Sets hadoop configuration.
-     *
-     * @param hadoopCfg Hadoop configuration.
-     * @return {@code this} for chaining.
-     */
-    public IgniteConfiguration setHadoopConfiguration(HadoopConfiguration hadoopCfg) {
-        this.hadoopCfg = hadoopCfg;
-
-        return this;
-    }
-
-    /**
      * @return Connector configuration.
      */
     public ConnectorConfiguration getConnectorConfiguration() {
diff --git a/modules/core/src/main/java/org/apache/ignite/events/EventType.java b/modules/core/src/main/java/org/apache/ignite/events/EventType.java
index fca1d02..5df3c81 100644
--- a/modules/core/src/main/java/org/apache/ignite/events/EventType.java
+++ b/modules/core/src/main/java/org/apache/ignite/events/EventType.java
@@ -46,7 +46,6 @@ import org.apache.ignite.spi.eventstorage.memory.MemoryEventStorageSpi;
  * <li>{@link #EVTS_DISCOVERY}</li>
  * <li>{@link #EVTS_DISCOVERY_ALL}</li>
  * <li>{@link #EVTS_ERROR}</li>
- * <li>{@link #EVTS_IGFS}</li>
  * <li>{@link #EVTS_JOB_EXECUTION}</li>
  * <li>{@link #EVTS_TASK_EXECUTION}</li>
  * </ul>
@@ -658,150 +657,6 @@ public interface EventType {
     public static final int EVT_CACHE_NODES_LEFT = 100;
 
     /**
-     * Built-in event type: IGFS file created.
-     * <p>
-     * Fired when IGFS component creates new file.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_CREATED = 116;
-
-    /**
-     * Built-in event type: IGFS file renamed.
-     * <p>
-     * Fired when IGFS component renames an existing file.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_RENAMED = 117;
-
-    /**
-     * Built-in event type: IGFS file deleted.
-     * <p>
-     * Fired when IGFS component deletes a file.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_DELETED = 118;
-
-    /**
-     * Built-in event type: IGFS file opened for reading.
-     * <p>
-     * Fired when IGFS file is opened for reading.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_OPENED_READ = 119;
-
-    /**
-     * Built-in event type: IGFS file opened for writing.
-     * <p>
-     * Fired when IGFS file is opened for writing.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_OPENED_WRITE = 120;
-
-    /**
-     * Built-in event type: IGFS file or directory metadata updated.
-     * <p>
-     * Fired when IGFS file or directory metadata is updated.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_META_UPDATED = 121;
-
-    /**
-     * Built-in event type: IGFS file closed.
-     * <p>
-     * Fired when IGFS file is closed.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_CLOSED_WRITE = 122;
-
-    /**
-     * Built-in event type: IGFS file closed.
-     * <p>
-     * Fired when IGFS file is closed.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_CLOSED_READ = 123;
-
-    /**
-     * Built-in event type: IGFS directory created.
-     * <p>
-     * Fired when IGFS component creates new directory.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_DIR_CREATED = 124;
-
-    /**
-     * Built-in event type: IGFS directory renamed.
-     * <p>
-     * Fired when IGFS component renames an existing directory.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_DIR_RENAMED = 125;
-
-    /**
-     * Built-in event type: IGFS directory deleted.
-     * <p>
-     * Fired when IGFS component deletes a directory.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_DIR_DELETED = 126;
-
-    /**
-     * Built-in event type: IGFS file purged.
-     * <p>
-     * Fired when IGFS file data was actually removed from cache.
-     * <p>
-     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
-     * internal Ignite events and should not be used by user-defined events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int EVT_IGFS_FILE_PURGED = 127;
-
-    /**
      * Built-in event type: WAL segment movement to archive folder completed
      * <p>
      * Fired for each completed WAL segment which was moved to archive
@@ -1203,28 +1058,6 @@ public interface EventType {
     };
 
     /**
-     * All Igfs events. This array can be directly passed into
-     * {@link IgniteEvents#localListen(IgnitePredicate, int...)} method to
-     * subscribe to all cloud events.
-     *
-     * @see IgfsEvent
-     */
-    public static final int[] EVTS_IGFS = {
-        EVT_IGFS_FILE_CREATED,
-        EVT_IGFS_FILE_RENAMED,
-        EVT_IGFS_FILE_DELETED,
-        EVT_IGFS_FILE_OPENED_READ,
-        EVT_IGFS_FILE_OPENED_WRITE,
-        EVT_IGFS_FILE_CLOSED_WRITE,
-        EVT_IGFS_FILE_CLOSED_READ,
-        EVT_IGFS_FILE_PURGED,
-        EVT_IGFS_META_UPDATED,
-        EVT_IGFS_DIR_CREATED,
-        EVT_IGFS_DIR_RENAMED,
-        EVT_IGFS_DIR_DELETED,
-    };
-
-    /**
      * All Transaction events. This array can be directly passed into
      * {@link IgniteEvents#localListen(IgnitePredicate, int...)} method to
      * subscribe to all transaction events.
diff --git a/modules/core/src/main/java/org/apache/ignite/events/IgfsEvent.java b/modules/core/src/main/java/org/apache/ignite/events/IgfsEvent.java
deleted file mode 100644
index 7ab973c..0000000
--- a/modules/core/src/main/java/org/apache/ignite/events/IgfsEvent.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.events;
-
-import java.util.Map;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.events.EventType.EVT_IGFS_DIR_CREATED;
-import static org.apache.ignite.events.EventType.EVT_IGFS_DIR_DELETED;
-import static org.apache.ignite.events.EventType.EVT_IGFS_DIR_RENAMED;
-
-/**
- * IGFS event.
- * <p>
- * Grid events are used for notification about what happens within the grid. Note that by
- * design Ignite keeps all events generated on the local node locally and it provides
- * APIs for performing a distributed queries across multiple nodes:
- * <ul>
- *      <li>
- *          {@link org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate, long, int...)} -
- *          asynchronously querying events occurred on the nodes specified, including remote nodes.
- *      </li>
- *      <li>
- *          {@link org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate, int...)} -
- *          querying only local events stored on this local node.
- *      </li>
- *      <li>
- *          {@link org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate, int...)} -
- *          listening to local grid events (events from remote nodes not included).
- *      </li>
- * </ul>
- * User can also wait for events using method {@link org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate, int...)}.
- * <h1 class="header">Events and Performance</h1>
- * Note that by default all events in Ignite are enabled and therefore generated and stored
- * by whatever event storage SPI is configured. Ignite can and often does generate thousands events per seconds
- * under the load and therefore it creates a significant additional load on the system. If these events are
- * not needed by the application this load is unnecessary and leads to significant performance degradation.
- * <p>
- * It is <b>highly recommended</b> to enable only those events that your application logic requires
- * by using {@link org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} method in Ignite configuration. Note that certain
- * events are required for Ignite's internal operations and such events will still be generated but not stored by
- * event storage SPI if they are disabled in Ignite configuration.
- *
- * @see EventType#EVT_IGFS_FILE_CREATED
- * @see EventType#EVT_IGFS_FILE_RENAMED
- * @see EventType#EVT_IGFS_FILE_DELETED
- */
-public class IgfsEvent extends EventAdapter {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** File path. */
-    private final IgfsPath path;
-
-    /** New file path (for RENAME event). */
-    private IgfsPath newPath;
-
-    /** Data size (for data transfer events). */
-    private long dataSize;
-
-    /** Updated metadata properties (for metadata update events). */
-    @GridToStringInclude
-    private Map<String, String> meta;
-
-    /**
-     * Constructs an event instance.
-     *
-     * @param path File or directory path.
-     * @param node Node.
-     * @param type Event type.
-     */
-    public IgfsEvent(IgfsPath path, ClusterNode node, int type) {
-        super(node, "IGFS event.", type);
-
-        this.path = path;
-    }
-
-    /**
-     * Constructs an event instance for path modification event
-     * ({@link EventType#EVT_IGFS_FILE_RENAMED},
-     * {@link EventType#EVT_IGFS_DIR_RENAMED}).
-     *
-     * @param path File or directory path.
-     * @param newPath New file or directory path.
-     * @param node Node.
-     * @param type Event type.
-     */
-    public IgfsEvent(IgfsPath path, IgfsPath newPath, ClusterNode node, int type) {
-        this(path, node, type);
-
-        this.newPath = newPath;
-    }
-
-    /**
-     * Constructs an event instance for close events:
-     * ({@link EventType#EVT_IGFS_FILE_CLOSED_READ},
-     * {@link EventType#EVT_IGFS_FILE_CLOSED_WRITE}).
-     *
-     * @param path File path.
-     * @param node Node.
-     * @param type Event type.
-     * @param dataSize Transferred data size in bytes.
-     */
-    public IgfsEvent(IgfsPath path, ClusterNode node, int type, long dataSize) {
-        this(path, node, type);
-
-        this.dataSize = dataSize;
-    }
-
-    /**
-     * Constructs an event instance for file metadata update events
-     * ({@link EventType#EVT_IGFS_META_UPDATED}).
-     *
-     * @param path File path.
-     * @param node Node.
-     * @param type Event type.
-     * @param meta Modified properties.
-     */
-    public IgfsEvent(IgfsPath path, ClusterNode node, int type, Map<String, String> meta) {
-        this(path, node, type);
-
-        this.meta = meta;
-    }
-
-    /**
-     * Path of the file or directory, on which event has occurred.
-     *
-     * @return File path.
-     */
-    public IgfsPath path() {
-        return path;
-    }
-
-    /**
-     * New file or directory path for this event (used in
-     * {@link EventType#EVT_IGFS_FILE_RENAMED} event).
-     *
-     * @return New file or directory path or {@code null},
-     *         if not relevant for this event.
-     */
-    @Nullable public IgfsPath newPath() {
-        return newPath;
-    }
-
-    /**
-     * Transferred data size for this event.
-     *
-     * @return Transferred data size in bytes.
-     */
-    public long dataSize() {
-        return dataSize;
-    }
-
-    /**
-     * Updated file metadata properties.
-     *
-     * @return Updated metadata properties or {@code null},
-     *         if not relevant for this event.
-     */
-    @Nullable public Map<String, String> updatedMeta() {
-        return meta;
-    }
-
-    /**
-     * Checks if this is a directory-related event.
-     *
-     * @return {@code True} if this event is directory-related.
-     */
-    public boolean isDirectory() {
-        int t = type();
-
-        return t == EVT_IGFS_DIR_CREATED || t == EVT_IGFS_DIR_RENAMED || t == EVT_IGFS_DIR_DELETED;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsEvent.class, this, super.toString());
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopInputSplit.java b/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopInputSplit.java
deleted file mode 100644
index b02a116..0000000
--- a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopInputSplit.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop;
-
-import java.io.Externalizable;
-
-/**
- * Abstract fragment of an input data source.
- */
-public abstract class HadoopInputSplit implements Externalizable {
-    /** */
-    protected String[] hosts;
-
-    /**
-     * Array of hosts where this input split resides.
-     *
-     * @return Hosts.
-     */
-    public String[] hosts() {
-        assert hosts != null;
-
-        return hosts;
-    }
-
-    /**
-     * This method must be implemented for purpose of internal implementation.
-     *
-     * @param obj Another object.
-     * @return {@code true} If objects are equal.
-     */
-    @Override public abstract boolean equals(Object obj);
-
-    /**
-     * This method must be implemented for purpose of internal implementation.
-     *
-     * @return Hash code of the object.
-     */
-    @Override public abstract int hashCode();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopJob.java b/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopJob.java
deleted file mode 100644
index 8ee0330..0000000
--- a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopJob.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.hadoop;
-
-import java.util.Collection;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Compact job description.
- */
-public interface HadoopJob {
-    /**
-     * Gets collection of input splits for this job.
-     *
-     * @return Input splits.
-     */
-    public Collection<HadoopInputSplit> input();
-
-    /**
-     * Gets optional configuration property for the job.
-     *
-     * @param name Property name.
-     * @return Value or {@code null} if none.
-     */
-    @Nullable String property(String name);
-
-    /**
-     * Checks whether job has combiner.
-     *
-     * @return {@code true} If job has combiner.
-     */
-    boolean hasCombiner();
-
-    /**
-     * Checks whether job has reducer.
-     * Actual number of reducers will be in {@link HadoopMapReducePlan#reducers()}.
-     *
-     * @return Number of reducer.
-     */
-    boolean hasReducer();
-
-    /**
-     * @return Number of reducers configured for job.
-     */
-    int reducers();
-
-    /**
-     * Gets job name.
-     *
-     * @return Job name.
-     */
-    String jobName();
-
-    /**
-     * Gets user name.
-     *
-     * @return User name.
-     */
-    String user();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopMapReducePlan.java b/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopMapReducePlan.java
deleted file mode 100644
index 8e5ba16..0000000
--- a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopMapReducePlan.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop;
-
-import java.io.Serializable;
-import java.util.Collection;
-import java.util.UUID;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Map-reduce job execution plan.
- */
-public interface HadoopMapReducePlan extends Serializable {
-    /**
-     * Gets collection of file blocks for which mappers should be executed.
-     *
-     * @param nodeId Node ID to check.
-     * @return Collection of file blocks or {@code null} if no mappers should be executed on given node.
-     */
-    @Nullable public Collection<HadoopInputSplit> mappers(UUID nodeId);
-
-    /**
-     * Gets reducer IDs that should be started on given node.
-     *
-     * @param nodeId Node ID to check.
-     * @return Array of reducer IDs.
-     */
-    @Nullable public int[] reducers(UUID nodeId);
-
-    /**
-     * Gets collection of all node IDs involved in map part of job execution.
-     *
-     * @return Collection of node IDs.
-     */
-    public Collection<UUID> mapperNodeIds();
-
-    /**
-     * Gets collection of all node IDs involved in reduce part of job execution.
-     *
-     * @return Collection of node IDs.
-     */
-    public Collection<UUID> reducerNodeIds();
-
-    /**
-     * Gets overall number of mappers for the job.
-     *
-     * @return Number of mappers.
-     */
-    public int mappers();
-
-    /**
-     * Gets overall number of reducers for the job.
-     *
-     * @return Number of reducers.
-     */
-    public int reducers();
-
-    /**
-     * Gets node ID for reducer.
-     *
-     * @param reducer Reducer.
-     * @return Node ID.
-     */
-    public UUID nodeForReducer(int reducer);
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopMapReducePlanner.java b/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopMapReducePlanner.java
deleted file mode 100644
index 61aa287..0000000
--- a/modules/core/src/main/java/org/apache/ignite/hadoop/HadoopMapReducePlanner.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop;
-
-import java.util.Collection;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.cluster.ClusterNode;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Map-reduce execution planner.
- */
-public interface HadoopMapReducePlanner {
-    /**
-     * Prepares map-reduce execution plan for the given job and topology.
-     *
-     * @param job Job.
-     * @param top Topology.
-     * @param oldPlan Old plan in case of partial failure.
-     * @return Map reduce plan.
-     * @throws IgniteCheckedException If an error occurs.
-     */
-    public HadoopMapReducePlan preparePlan(HadoopJob job, Collection<ClusterNode> top,
-        @Nullable HadoopMapReducePlan oldPlan) throws IgniteCheckedException;
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/hadoop/package-info.java b/modules/core/src/main/java/org/apache/ignite/hadoop/package-info.java
deleted file mode 100644
index 2343b81..0000000
--- a/modules/core/src/main/java/org/apache/ignite/hadoop/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Contains Hadoop Accelerator APIs.
- */
-
-package org.apache.ignite.hadoop;
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java
deleted file mode 100644
index 79e0cbe..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.util.Collection;
-import java.util.UUID;
-
-/**
- * {@code IGFS} file's data block location in the grid. It is used to determine
- * node affinity of a certain file block within the Grid by calling
- * {@link org.apache.ignite.IgniteFileSystem#affinity(IgfsPath, long, long)} method.
- */
-public interface IgfsBlockLocation {
-    /**
-     * Start position in the file this block relates to.
-     *
-     * @return Start position in the file this block relates to.
-     */
-    public long start();
-
-    /**
-     * Length of the data block in the file.
-     *
-     * @return Length of the data block in the file.
-     */
-    public long length();
-
-    /**
-     * Nodes this block belongs to. First node id in collection is
-     * primary node id.
-     *
-     * @return Nodes this block belongs to.
-     */
-    public Collection<UUID> nodeIds();
-
-    /**
-     * Compliant with Hadoop interface.
-     *
-     * @return Collection of host:port addresses.
-     */
-    public Collection<String> names();
-
-    /**
-     * Compliant with Hadoop interface.
-     *
-     * @return Collection of host names.
-     */
-    public Collection<String> hosts();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsConcurrentModificationException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsConcurrentModificationException.java
deleted file mode 100644
index eb7591d..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsConcurrentModificationException.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} exception indicating that file system structure was modified concurrently. This error
- * indicates that an operation performed in DUAL mode cannot proceed due to these changes.
- */
-public class IgfsConcurrentModificationException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsConcurrentModificationException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsConcurrentModificationException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    public IgfsConcurrentModificationException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsCorruptedFileException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsCorruptedFileException.java
deleted file mode 100644
index 9e870a0..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsCorruptedFileException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Exception thrown when target file's block is not found in data cache.
- */
-public class IgfsCorruptedFileException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsCorruptedFileException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsCorruptedFileException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    public IgfsCorruptedFileException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsDirectoryNotEmptyException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsDirectoryNotEmptyException.java
deleted file mode 100644
index 0080844..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsDirectoryNotEmptyException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Exception indicating that directory can not be deleted because it is not empty.
- */
-public class IgfsDirectoryNotEmptyException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsDirectoryNotEmptyException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsDirectoryNotEmptyException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    public IgfsDirectoryNotEmptyException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsException.java
deleted file mode 100644
index 511ddac..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsException.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.ignite.IgniteException;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} exception thrown by file system components.
- */
-public class IgfsException extends IgniteException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    public IgfsException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java
deleted file mode 100644
index 6b3d95d..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.util.Map;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} file or directory descriptor. For example, to get information about
- * a file you would use the following code:
- * <pre name="code" class="java">
- *     IgfsPath filePath = new IgfsPath("my/working/dir", "file.txt");
- *
- *     // Get metadata about file.
- *     IgfsFile file = igfs.info(filePath);
- * </pre>
- */
-public interface IgfsFile {
-    /**
-     * Gets path to file.
-     *
-     * @return Path to file.
-     */
-    public IgfsPath path();
-
-    /**
-     * Check this file is a data file.
-     *
-     * @return {@code True} if this is a data file.
-     */
-    public boolean isFile();
-
-    /**
-     * Check this file is a directory.
-     *
-     * @return {@code True} if this is a directory.
-     */
-    public boolean isDirectory();
-
-    /**
-     * Gets file's length.
-     *
-     * @return File's length or {@code zero} for directories.
-     */
-    public long length();
-
-    /**
-     * Gets file's data block size.
-     *
-     * @return File's data block size or {@code zero} for directories.
-     */
-    public int blockSize();
-
-    /**
-     * Gets file group block size (i.e. block size * group size).
-     *
-     * @return File group block size.
-     */
-    public long groupBlockSize();
-
-    /**
-     * Gets file last access time. File last access time is not updated automatically due to
-     * performance considerations and can be updated on demand with
-     * {@link org.apache.ignite.IgniteFileSystem#setTimes(IgfsPath, long, long)} method.
-     * <p>
-     * By default last access time equals file creation time.
-     *
-     * @return Last access time.
-     */
-    public long accessTime();
-
-    /**
-     * Gets file last modification time. File modification time is updated automatically on each file write and
-     * append.
-     *
-     * @return Last modification time.
-     */
-    public long modificationTime();
-
-    /**
-     * Get file's property for specified name.
-     *
-     * @param name Name of the property.
-     * @return File's property for specified name.
-     * @throws IllegalArgumentException If requested property was not found.
-     */
-    public String property(String name) throws IllegalArgumentException;
-
-    /**
-     * Get file's property for specified name.
-     *
-     * @param name Name of the property.
-     * @param dfltVal Default value if requested property was not found.
-     * @return File's property for specified name.
-     */
-    @Nullable public String property(String name, @Nullable String dfltVal);
-
-    /**
-     * Get properties of the file.
-     *
-     * @return Properties of the file.
-     */
-    public Map<String, String> properties();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsGroupDataBlocksKeyMapper.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsGroupDataBlocksKeyMapper.java
deleted file mode 100644
index c05e0b4..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsGroupDataBlocksKeyMapper.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.ignite.internal.processors.cache.GridCacheDefaultAffinityKeyMapper;
-import org.apache.ignite.internal.processors.igfs.IgfsBaseBlockKey;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.lang.IgniteUuid;
-
-/**
- * {@code IGFS} class providing ability to group file's data blocks together on one node.
- * All blocks within the same group are guaranteed to be cached together on the same node.
- * Group size parameter controls how many sequential blocks will be cached together on the same node.
- * <p>
- * For example, if block size is {@code 64kb} and group size is {@code 256}, then each group will contain
- * {@code 64kb * 256 = 16Mb}. Larger group sizes would reduce number of splits required to run map-reduce
- * tasks, but will increase inequality of data size being stored on different nodes.
- * <p>
- * Note that {@link #getGroupSize()} parameter must correlate to Hadoop split size parameter defined
- * in Hadoop via {@code mapred.max.split.size} property. Ideally you want all blocks accessed
- * within one split to be mapped to {@code 1} group, so they can be located on the same grid node.
- * For example, default Hadoop split size is {@code 64mb} and default {@code IGFS} block size
- * is {@code 64kb}. This means that to make sure that each split goes only through blocks on
- * the same node (without hopping between nodes over network), we have to make the {@link #getGroupSize()}
- * value be equal to {@code 64mb / 64kb = 1024}.
- * <p>
- * It is required for {@code IGFS} data cache to be configured with this mapper. Here is an
- * example of how it can be specified in XML configuration:
- * <pre name="code" class="xml">
- * &lt;bean id="cacheCfgBase" class="org.apache.ignite.cache.CacheConfiguration" abstract="true"&gt;
- *     ...
- *     &lt;property name="affinityMapper"&gt;
- *         &lt;bean class="org.apache.ignite.igfs.IgfsGroupDataBlocksKeyMapper"&gt;
- *             &lt;!-- How many sequential blocks will be stored on the same node. --&gt;
- *             &lt;property name="groupSize" value="512"/&gt;
- *         &lt;/bean&gt;
- *     &lt;/property&gt;
- *     ...
- * &lt;/bean&gt;
- * </pre>
- */
-public class IgfsGroupDataBlocksKeyMapper extends GridCacheDefaultAffinityKeyMapper {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Default group size.*/
-    public static final int DFLT_GRP_SIZE = 1024;
-
-    /** Size of the group. */
-    private int grpSize = DFLT_GRP_SIZE;
-
-    /**
-     * Default constructor.
-     */
-    public IgfsGroupDataBlocksKeyMapper() {
-        // No-op.
-    }
-
-    /***
-     * Constructs affinity mapper to group several data blocks with the same key.
-     *
-     * @param grpSize Size of the group in blocks.
-     */
-    public IgfsGroupDataBlocksKeyMapper(int grpSize) {
-        A.ensure(grpSize >= 1, "grpSize >= 1");
-
-        this.grpSize = grpSize;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object affinityKey(Object key) {
-        if (key instanceof IgfsBaseBlockKey) {
-            IgfsBaseBlockKey blockKey = (IgfsBaseBlockKey)key;
-
-            IgniteUuid affKey = blockKey.affinityKey();
-
-            if (affKey != null)
-                return affKey;
-
-            long grpId = blockKey.blockId() / grpSize;
-
-            return blockKey.fileHash() + (int)(grpId ^ (grpId >>> 32));
-        }
-
-        return super.affinityKey(key);
-    }
-
-    /**
-     * Get group size.
-     * <p>
-     * Group size defines how many sequential file blocks will reside on the same node. This parameter
-     * must correlate to Hadoop split size parameter defined in Hadoop via {@code mapred.max.split.size}
-     * property. Ideally you want all blocks accessed within one split to be mapped to {@code 1} group,
-     * so they can be located on the same grid node. For example, default Hadoop split size is {@code 64mb}
-     * and default {@code IGFS} block size is {@code 64kb}. This means that to make sure that each split
-     * goes only through blocks on the same node (without hopping between nodes over network), we have to
-     * make the group size be equal to {@code 64mb / 64kb = 1024}.
-     * <p>
-     * Defaults to {@link #DFLT_GRP_SIZE}.
-     *
-     * @return Group size.
-     */
-    public int getGroupSize() {
-        return grpSize;
-    }
-
-    /**
-     * Set group size. See {@link #getGroupSize()} for more information.
-     *
-     * @param grpSize Group size.
-     * @return {@code this} for chaining.
-     */
-    public IgfsGroupDataBlocksKeyMapper setGroupSize(int grpSize) {
-        this.grpSize = grpSize;
-
-        return this;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsGroupDataBlocksKeyMapper.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java
deleted file mode 100644
index 65f8436..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * {@code IGFS} input stream to read data from the file system.
- * It provides several additional methods for asynchronous access.
- */
-public abstract class IgfsInputStream extends InputStream {
-    /**
-     * Gets file length during file open.
-     *
-     * @return File length.
-     */
-    public abstract long length();
-
-    /**
-     * Seek to the specified position.
-     *
-     * @param pos Position to seek to.
-     * @throws IOException In case of IO exception.
-     */
-    public abstract void seek(long pos) throws IOException;
-
-    /**
-     * Get the current position in the input stream.
-     *
-     * @return The current position in the input stream.
-     * @throws IOException In case of IO exception.
-     */
-    public abstract long position() throws IOException;
-
-    /**
-     * Read bytes from the given position in the stream to the given buffer.
-     * Continues to read until passed buffer becomes filled.
-     *
-     * @param pos Position in the input stream to seek.
-     * @param buf Buffer into which data is read.
-     * @throws IOException In case of IO exception.
-     */
-    public abstract void readFully(long pos, byte[] buf) throws IOException;
-
-    /**
-     *
-     * @param pos Position in the input stream to seek.
-     * @param buf Buffer into which data is read.
-     * @param off Offset in the buffer from which stream data should be written.
-     * @param len The number of bytes to read.
-     * @throws IOException In case of IO exception.
-     */
-    public abstract void readFully(long pos, byte[] buf, int off, int len) throws IOException;
-
-    /**
-     *
-     * @param pos Position in the input stream to seek.
-     * @param buf Buffer into which data is read.
-     * @param off Offset in the buffer from which stream data should be written.
-     * @param len The number of bytes to read.
-     * @return Total number of bytes read into the buffer, or -1 if there is no more data (EOF).
-     * @throws IOException In case of IO exception.
-     */
-    public abstract int read(long pos, byte[] buf, int off, int len) throws IOException;
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInvalidHdfsVersionException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInvalidHdfsVersionException.java
deleted file mode 100644
index f6ea680..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInvalidHdfsVersionException.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Exception thrown when Ignite detects that remote HDFS version differs from version of HDFS libraries
- * in Ignite classpath.
- */
-public class IgfsInvalidHdfsVersionException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsInvalidHdfsVersionException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsInvalidHdfsVersionException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    public IgfsInvalidHdfsVersionException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInvalidPathException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInvalidPathException.java
deleted file mode 100644
index 0c1fa7a..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInvalidPathException.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} exception indicating that operation target is invalid
- * (e.g. not a file while expecting to be a file).
- */
-public class IgfsInvalidPathException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsInvalidPathException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsInvalidPathException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    public IgfsInvalidPathException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsIpcEndpointConfiguration.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsIpcEndpointConfiguration.java
deleted file mode 100644
index c53bf41..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsIpcEndpointConfiguration.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import static org.apache.ignite.igfs.IgfsIpcEndpointType.SHMEM;
-import static org.apache.ignite.igfs.IgfsIpcEndpointType.TCP;
-
-/**
- * IGFS IPC endpoint configuration.
- */
-public class IgfsIpcEndpointConfiguration {
-    /** Default endpoint type is TCP. */
-    public static IgfsIpcEndpointType DFLT_TYPE = U.hasSharedMemory() ? SHMEM : TCP;
-
-    /** Default host. */
-    public static String DFLT_HOST = "127.0.0.1";
-
-    /** Default port. */
-    public static int DFLT_PORT = 10500;
-
-    /** Default shared memory space in bytes. */
-    public static final int DFLT_MEM_SIZE = 256 * 1024;
-
-    /**
-     * Default token directory. Note that this path is relative to {@code IGNITE_HOME/work} folder
-     * if {@code IGNITE_HOME} system or environment variable specified, otherwise it is relative to
-     * {@code work} folder under system {@code java.io.tmpdir} folder.
-     *
-     * @see IgniteConfiguration#getWorkDirectory()
-     */
-    public static final String DFLT_TOKEN_DIR_PATH = "ipc/shmem";
-
-    /** Default threads count. */
-    public static final int DFLT_THREAD_CNT = IgniteConfiguration.AVAILABLE_PROC_CNT;
-
-    /** Endpoint type. */
-    private IgfsIpcEndpointType type = DFLT_TYPE;
-
-    /** Host. */
-    private String host = DFLT_HOST;
-
-    /** Port. */
-    private int port = DFLT_PORT;
-
-    /** Space size. */
-    private int memSize = DFLT_MEM_SIZE;
-
-    /** Token directory path. */
-    private String tokenDirPath = DFLT_TOKEN_DIR_PATH;
-
-    /** Thread count. */
-    private int threadCnt = DFLT_THREAD_CNT;
-
-    /**
-     * Default constructor.
-     */
-    public IgfsIpcEndpointConfiguration() {
-        // No-op.
-    }
-
-    /**
-     * Copying constructor.
-     *
-     * @param cfg Configuration to copy.
-     */
-    public IgfsIpcEndpointConfiguration(IgfsIpcEndpointConfiguration cfg) {
-        type = cfg.getType();
-        host = cfg.getHost();
-        port = cfg.getPort();
-        memSize = cfg.getMemorySize();
-        tokenDirPath = cfg.getTokenDirectoryPath();
-    }
-
-    /**
-     * Gets endpoint type. There are two endpoints types: {@code SHMEM} working over shared memory, and {@code TCP}
-     * working over sockets.
-     * <p>
-     * Shared memory is recommended approach for Linux-based systems. For Windows TCP is the only available option.
-     * <p>
-     * Defaults to {@link #DFLT_TYPE}.
-     *
-     * @return Endpoint type.
-     */
-    public IgfsIpcEndpointType getType() {
-        return type;
-    }
-
-    /**
-     * Sets endpoint type. There are two endpoints types: {@link IgfsIpcEndpointType#SHMEM} working over shared memory,
-     * and {@link IgfsIpcEndpointType#TCP} working over sockets.
-     * <p>
-     * Shared memory is recommended approach for Linux-based systems. For Windows TCP is the only available option.
-     * <p>
-     * Defaults to {@link #DFLT_TYPE}.
-     *
-     * @param type Endpoint type.
-     * @return {@code this} for chaining.
-     */
-    public IgfsIpcEndpointConfiguration setType(IgfsIpcEndpointType type) {
-        this.type = type;
-
-        return this;
-    }
-
-    /**
-     * Gets the host endpoint is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#TCP} endpoint this is the network interface server socket is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#SHMEM} endpoint socket connection is needed only to perform an initial handshake.
-     * All further communication is performed over shared memory. Therefore, for {@code SHMEM} this value is ignored
-     * and socket will be always bound to {@link #DFLT_HOST}.
-     * <p>
-     * Defaults to {@link #DFLT_HOST}.
-     *
-     * @return Host.
-     */
-    public String getHost() {
-        return host;
-    }
-
-    /**
-     * Sets the host endpoint is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#TCP} endpoint this is the network interface server socket is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#SHMEM} endpoint socket connection is needed only to perform an initial handshake.
-     * All further communication is performed over shared memory. Therefore, for {@code SHMEM} this value is ignored
-     * and socket will be always bound to {@link #DFLT_HOST}.
-     * <p>
-     * Defaults to {@link #DFLT_HOST}.
-     *
-     * @param host Host.
-     * @return {@code this} for chaining.
-     */
-    public IgfsIpcEndpointConfiguration setHost(String host) {
-        this.host = host;
-
-        return this;
-    }
-
-    /**
-     * Gets the port endpoint is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#TCP} endpoint this is the port server socket is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#SHMEM} endpoint socket connection is needed only to perform an initial handshake.
-     * All further communication is performed over shared memory.
-     * <p>
-     * Defaults to {@link #DFLT_PORT}.
-     *
-     * @return Port.
-     */
-    public int getPort() {
-        return port;
-    }
-
-    /**
-     * Sets the port endpoint is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#TCP} endpoint this is the port server socket is bound to.
-     * <p>
-     * For {@link IgfsIpcEndpointType#SHMEM} endpoint socket connection is needed only to perform an initial handshake.
-     * All further communication is performed over shared memory.
-     * <p>
-     * Defaults to {@link #DFLT_PORT}.
-     *
-     * @param port Port.
-     * @return {@code this} for chaining.
-     */
-    public IgfsIpcEndpointConfiguration setPort(int port) {
-        this.port = port;
-
-        return this;
-    }
-
-    /**
-     * Gets shared memory size in bytes allocated for endpoint communication.
-     * <p>
-     * Ignored for {@link IgfsIpcEndpointType#TCP} endpoint.
-     * <p>
-     * Defaults to {@link #DFLT_MEM_SIZE}.
-     *
-     * @return Shared memory size.
-     */
-    public int getMemorySize() {
-        return memSize;
-    }
-
-    /**
-     * Sets shared memory size in bytes allocated for endpoint communication.
-     * <p>
-     * Ignored for {@link IgfsIpcEndpointType#TCP} endpoint.
-     * <p>
-     * Defaults to {@link #DFLT_MEM_SIZE}.
-     *
-     * @param memSize Shared memory size.
-     * @return {@code this} for chaining.
-     */
-    public IgfsIpcEndpointConfiguration setMemorySize(int memSize) {
-        this.memSize = memSize;
-
-        return this;
-    }
-
-    /**
-     * Gets directory where shared memory tokens are stored.
-     * <p>
-     * Note that this path is relative to {@code IGNITE_HOME/work} folder if {@code IGNITE_HOME} system or environment
-     * variable specified, otherwise it is relative to {@code work} folder under system {@code java.io.tmpdir} folder.
-     * <p>
-     * Ignored for {@link IgfsIpcEndpointType#TCP} endpoint.
-     * <p>
-     * Defaults to {@link #DFLT_TOKEN_DIR_PATH}.
-     *
-     * @return Directory where shared memory tokens are stored.
-     */
-    public String getTokenDirectoryPath() {
-        return tokenDirPath;
-    }
-
-    /**
-     * Sets directory where shared memory tokens are stored.
-     * <p>
-     * Note that this path is relative to {@code IGNITE_HOME/work} folder if {@code IGNITE_HOME} system or environment
-     * variable specified, otherwise it is relative to {@code work} folder under system {@code java.io.tmpdir} folder.
-     * <p>
-     * Ignored for {@link IgfsIpcEndpointType#TCP} endpoint.
-     * <p>
-     * Defaults to {@link #DFLT_TOKEN_DIR_PATH}.
-     *
-     * @param tokenDirPath Directory where shared memory tokens are stored.
-     * @return {@code this} for chaining.
-     */
-    public IgfsIpcEndpointConfiguration setTokenDirectoryPath(String tokenDirPath) {
-        this.tokenDirPath = tokenDirPath;
-
-        return this;
-    }
-
-    /**
-     * Get number of threads used by this endpoint to process incoming requests.
-     * <p>
-     * Defaults to {@link #DFLT_THREAD_CNT}.
-     *
-     * @return Number of threads used by this endpoint to process incoming requests.
-     */
-    public int getThreadCount() {
-        return threadCnt;
-    }
-
-    /**
-     * Set number of threads used by this endpoint to process incoming requests.
-     * <p>
-     * See {@link #getThreadCount()} for more information.
-     *
-     * @param threadCnt Number of threads used by this endpoint to process incoming requests.
-     * @return {@code this} for chaining.
-     */
-    public IgfsIpcEndpointConfiguration setThreadCount(int threadCnt) {
-        this.threadCnt = threadCnt;
-
-        return this;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsIpcEndpointConfiguration.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsIpcEndpointType.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsIpcEndpointType.java
deleted file mode 100644
index a7ecaa2..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsIpcEndpointType.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-/**
- * IGFS endpoint type.
- */
-public enum IgfsIpcEndpointType {
-    /** Shared memory endpoint. */
-    SHMEM,
-
-    /** TCP endpoint. */
-    TCP
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
deleted file mode 100644
index f3b734b..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.ignite.configuration.DataRegionConfiguration;
-
-/**
- * {@code IGFS} metrics snapshot for the file system. Note, that some metrics are global and
- * some are local (i.e. per each node).
- */
-public interface IgfsMetrics {
-    /**
-     * Gets local used space in bytes. This is the sum of all file chunks stored on local node.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Node used space in bytes.
-     */
-    public long localSpaceSize();
-
-    /**
-     * Gets maximum amount of data that can be stored on local node. This metrics is related to
-     * to the {@link DataRegionConfiguration#getMaxSize()} of the IGFS data cache.
-     *
-     * @return Maximum IGFS local space size.
-     */
-    public long maxSpaceSize();
-
-    /**
-    * Get used space in bytes used in the secondary file system.
-    * <p>
-    * This is a global metric.
-    *
-    * @return Used space in the secondary file system or {@code 0} in case no secondary file system is configured.
-    */
-    public long secondarySpaceSize();
-
-    /**
-     * Gets number of directories created in file system.
-     * <p>
-     * This is a global metric.
-     *
-     * @return Number of directories.
-     */
-    public int directoriesCount();
-
-    /**
-     * Gets number of files stored in file system.
-     * <p>
-     * This is a global metric.
-     *
-     * @return Number of files.
-     */
-    public int filesCount();
-
-    /**
-     * Gets number of files that are currently opened for reading.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Number of opened files.
-     */
-    public int filesOpenedForRead();
-
-    /**
-     * Gets number of files that are currently opened for writing.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Number of opened files.
-     */
-    public int filesOpenedForWrite();
-
-    /**
-     * Gets total blocks read, local and remote.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total blocks read.
-     */
-    public long blocksReadTotal();
-
-    /**
-     * Gets total remote blocks read.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total blocks remote read.
-     */
-    public long blocksReadRemote();
-
-    /**
-     * Gets total blocks written, local and remote.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total blocks written.
-     */
-    public long blocksWrittenTotal();
-
-    /**
-     * Gets total remote blocks written.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total blocks written.
-     */
-    public long blocksWrittenRemote();
-
-    /**
-     * Gets total bytes read.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total bytes read.
-     */
-    public long bytesRead();
-
-    /**
-     * Gets total bytes read time.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total bytes read time.
-     */
-    public long bytesReadTime();
-
-    /**
-     * Gets total bytes written.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total bytes written.
-     */
-    public long bytesWritten();
-
-    /**
-     * Gets total bytes write time.
-     * <p>
-     * This is a local metric.
-     *
-     * @return Total bytes write time.
-     */
-    public long bytesWriteTime();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsOutOfSpaceException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsOutOfSpaceException.java
deleted file mode 100644
index 2b3ed53..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsOutOfSpaceException.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} exception that is thrown when it detected out-of-space condition.
- * It is thrown when number of writes written to a {@code IGFS} data nodes exceeds
- * its maximum value (that is configured per-node).
- */
-public class IgfsOutOfSpaceException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsOutOfSpaceException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsOutOfSpaceException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    public IgfsOutOfSpaceException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsOutputStream.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsOutputStream.java
deleted file mode 100644
index 3575284..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsOutputStream.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * {@code IGFS} output stream to write data into the file system.
- */
-public abstract class IgfsOutputStream extends OutputStream {
-    /**
-     * Transfers specified amount of bytes from data input to this output stream.
-     * This method is optimized to avoid unnecessary temporal buffer creation and byte array copy.
-     *
-     * @param in Data input to copy bytes from.
-     * @param len Data length to copy.
-     * @throws IOException If write failed, read from input failed or there is no enough data in data input.
-     */
-    public abstract void transferFrom(DataInput in, int len) throws IOException;
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsParentNotDirectoryException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsParentNotDirectoryException.java
deleted file mode 100644
index 8d6040c..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsParentNotDirectoryException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Exception thrown when parent supposed to be a directory is a file.
- */
-public class IgfsParentNotDirectoryException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsParentNotDirectoryException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsParentNotDirectoryException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg   Message.
-     * @param cause Cause.
-     */
-    public IgfsParentNotDirectoryException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPath.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPath.java
deleted file mode 100644
index 1e49fe6..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPath.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.ignite.binary.BinaryObjectException;
-import org.apache.ignite.binary.BinaryRawReader;
-import org.apache.ignite.binary.BinaryRawWriter;
-import org.apache.ignite.binary.BinaryReader;
-import org.apache.ignite.binary.BinaryWriter;
-import org.apache.ignite.binary.Binarylizable;
-import org.apache.ignite.internal.util.io.GridFilenameUtils;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} path to file in the file system. For example, to get information about
- * a file you would use the following code:
- * <pre name="code" class="java">
- *     IgfsPath dirPath = new IgfsPath("/my/working/dir");
- *     IgfsPath filePath = new IgfsPath(dirPath, "file.txt");
- *
- *     // Get metadata about file.
- *     IgfsFile file = igfs.info(filePath);
- * </pre>
- */
-public final class IgfsPath implements Comparable<IgfsPath>, Externalizable, Binarylizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** The directory separator character. */
-    private static final char SLASH_CHAR = '/';
-
-    /** The directory separator. */
-    public static final String SLASH = "/";
-
-    /** URI representing this path. Should never change after object creation or de-serialization. */
-    private String path;
-
-    /** Root path. */
-    public static final IgfsPath ROOT = new IgfsPath(SLASH);
-
-    /**
-     * Default constructor.
-     */
-    public IgfsPath() {
-        path = SLASH;
-    }
-
-    /**
-     * Constructs a path from an URI
-     *
-     * @param uri URI to create path from.
-     */
-    public IgfsPath(URI uri) {
-        A.notNull(uri, "uri");
-
-        path = normalizePath(uri.getPath());
-    }
-
-    /**
-     * Constructs a path from the URI string.
-     *
-     * @param path URI string.
-     */
-    public IgfsPath(String path) {
-        A.ensure(!F.isEmpty(path), "'path' is null or empty");
-
-        this.path = normalizePath(path);
-    }
-
-    /**
-     * Resolve a child path against a parent path.
-     *
-     * @param parentPath Parent path.
-     * @param childPath Child path.
-     */
-    public IgfsPath(IgfsPath parentPath, String childPath) {
-        A.notNull(parentPath, "parentPath");
-
-        String path = GridFilenameUtils.concat(parentPath.path, childPath);
-
-        if (F.isEmpty(path))
-            throw new IllegalArgumentException("Failed to parse path" +
-                " [parent=" + parentPath + ", childPath=" + childPath + ']');
-
-        this.path = normalizePath(path);
-    }
-
-    /**
-     * Initialize path with (1) not-null, (2) normalized, (3) absolute and (4) unix-format path component.
-     *
-     * @param path Path.
-     * @return Normalized path.
-     */
-    private static String normalizePath(String path) {
-        assert path != null;
-
-        String normalizedPath = GridFilenameUtils.normalizeNoEndSeparator(path, true);
-
-        if (F.isEmpty(normalizedPath))
-            throw new IllegalArgumentException("Failed to normalize path: " + path);
-
-        if (!SLASH.equals(GridFilenameUtils.getPrefix(normalizedPath)))
-            throw new IllegalArgumentException("Path should be absolute: " + path);
-
-        assert !normalizedPath.isEmpty() : "Expects normalized path is not empty.";
-        assert normalizedPath.length() == 1 || !normalizedPath.endsWith(SLASH) :
-            "Expects normalized path is root or don't ends with '/' symbol.";
-
-        return normalizedPath;
-    }
-
-    /**
-     * Returns the final component of this path.
-     *
-     * @return The final component of this path.
-     */
-    public String name() {
-        return GridFilenameUtils.getName(path);
-    }
-
-    /**
-     * Split full path on components.
-     *
-     * @return Path components.
-     */
-    public List<String> components() {
-        String path = this.path;
-
-        assert path.length() >= 1 : "Path expected to be absolute: " + path;
-
-        // Path is short-living object, so we don't need to cache component's resolution result.
-        return path.length() == 1 ? Collections.<String>emptyList() : Arrays.asList(path.substring(1).split(SLASH));
-    }
-
-    /**
-     * Get components in array form.
-     *
-     * @return Components array.
-     */
-    public String[] componentsArray() {
-        return path.length() == 1 ? new String[0] : path.substring(1).split(SLASH);
-    }
-
-    /**
-     * Returns the parent of a path or {@code null} if at root.
-     *
-     * @return The parent of a path or {@code null} if at root.
-     */
-    @Nullable public IgfsPath parent() {
-        String path = this.path;
-
-        if (path.length() == 1)
-            return null; // Current path is root.
-
-        path = GridFilenameUtils.getFullPathNoEndSeparator(path);
-
-        return new IgfsPath(path);
-    }
-
-    /**
-     * Adds a suffix to the final name in the path.
-     *
-     * @param suffix Suffix.
-     * @return Path with suffix.
-     */
-    public IgfsPath suffix(String suffix) {
-        A.ensure(!F.isEmpty(suffix), "'suffix' is null or empty.");
-        A.ensure(!suffix.contains(SLASH), "'suffix' contains file's separator '" + SLASH + "'");
-
-        return new IgfsPath(path + suffix);
-    }
-
-    /**
-     * Return the number of elements in this path.
-     *
-     * @return The number of elements in this path, zero depth means root directory.
-     */
-    public int depth() {
-        final String path = this.path;
-        final int size = path.length();
-
-        assert size >= 1 && path.charAt(0) == SLASH_CHAR : "Expects absolute path: " + path;
-
-        if (size == 1)
-            return 0;
-
-        int depth = 1;
-
-        // Ignore the first character.
-        for (int i = 1; i < size; i++)
-            if (path.charAt(i) == SLASH_CHAR)
-                depth++;
-
-        return depth;
-    }
-
-    /**
-     * Checks whether this path is a sub-directory of argument.
-     *
-     * @param path Path to check.
-     * @return {@code True} if argument is same or a sub-directory of this object.
-     */
-    public boolean isSubDirectoryOf(IgfsPath path) {
-        A.notNull(path, "path");
-
-        return this.path.startsWith(path.path.endsWith(SLASH) ? path.path : path.path + SLASH);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int compareTo(IgfsPath o) {
-        return path.compareTo(o.path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        U.writeString(out, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException {
-        path = U.readString(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriter writer) throws BinaryObjectException {
-        writeRawBinary(writer.rawWriter());
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReader reader) throws BinaryObjectException {
-        readRawBinary(reader.rawReader());
-    }
-
-    /**
-     * Write raw binary.
-     *
-     * @param writer Raw writer.
-     * @throws BinaryObjectException If failed.
-     */
-    public void writeRawBinary(BinaryRawWriter writer) throws BinaryObjectException {
-        writer.writeString(path);
-    }
-
-    /**
-     * Read raw binary.
-     *
-     * @param reader Raw reader.
-     * @throws BinaryObjectException If failed.
-     */
-    public void readRawBinary(BinaryRawReader reader) throws BinaryObjectException {
-        path = reader.readString();
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return path.hashCode();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        return o == this || o != null && getClass() == o.getClass() && path.equals(((IgfsPath)o).path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return path;
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathAlreadyExistsException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathAlreadyExistsException.java
deleted file mode 100644
index e0a408a..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathAlreadyExistsException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Exception thrown when target path supposed to be created already exists.
- */
-public class IgfsPathAlreadyExistsException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsPathAlreadyExistsException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsPathAlreadyExistsException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg   Message.
-     * @param cause Cause.
-     */
-    public IgfsPathAlreadyExistsException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathIsDirectoryException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathIsDirectoryException.java
deleted file mode 100644
index 760f582..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathIsDirectoryException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Exception indicating that path is directory, while it is expected to be a file.
- */
-public class IgfsPathIsDirectoryException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsPathIsDirectoryException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsPathIsDirectoryException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg   Message.
-     * @param cause Cause.
-     */
-    public IgfsPathIsDirectoryException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathIsNotDirectoryException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathIsNotDirectoryException.java
deleted file mode 100644
index 50e9eca..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathIsNotDirectoryException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Exception indicating that path is not directory.
- */
-public class IgfsPathIsNotDirectoryException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsPathIsNotDirectoryException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsPathIsNotDirectoryException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg   Message.
-     * @param cause Cause.
-     */
-    public IgfsPathIsNotDirectoryException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathNotFoundException.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathNotFoundException.java
deleted file mode 100644
index 1351031..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathNotFoundException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.jetbrains.annotations.Nullable;
-
-/**
- * {@code IGFS} exception indicating that target resource is not found.
- */
-public class IgfsPathNotFoundException extends IgfsException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Constructor.
-     *
-     * @param msg Message.
-     */
-    public IgfsPathNotFoundException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cause Cause.
-     */
-    public IgfsPathNotFoundException(Throwable cause) {
-        super(cause);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param msg   Message.
-     * @param cause Cause.
-     */
-    public IgfsPathNotFoundException(@Nullable String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathSummary.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathSummary.java
deleted file mode 100644
index 33f9ec0..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsPathSummary.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-import org.apache.ignite.binary.BinaryObjectException;
-import org.apache.ignite.binary.BinaryRawReader;
-import org.apache.ignite.binary.BinaryRawWriter;
-import org.apache.ignite.binary.BinaryReader;
-import org.apache.ignite.binary.BinaryWriter;
-import org.apache.ignite.binary.Binarylizable;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Path summary: total files count, total directories count, total length.
- */
-public class IgfsPathSummary implements Externalizable, Binarylizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Path. */
-    private IgfsPath path;
-
-    /** File count. */
-    private int filesCnt;
-
-    /** Directories count. */
-    private int dirCnt;
-
-    /** Length consumed. */
-    private long totalLen;
-
-    /**
-     * Empty constructor required by {@link Externalizable}.
-     */
-    public IgfsPathSummary() {
-        // No-op.
-    }
-
-    /**
-     * Construct empty path summary.
-     *
-     * @param path Path.
-     */
-    public IgfsPathSummary(IgfsPath path) {
-        this.path = path;
-    }
-
-    /**
-     * @return Files count.
-     */
-    public int filesCount() {
-        return filesCnt;
-    }
-
-    /**
-     * @param filesCnt Files count.
-     */
-    public void filesCount(int filesCnt) {
-        this.filesCnt = filesCnt;
-    }
-
-    /**
-     * @return Directories count.
-     */
-    public int directoriesCount() {
-        return dirCnt;
-    }
-
-    /**
-     * @param dirCnt Directories count.
-     */
-    public void directoriesCount(int dirCnt) {
-        this.dirCnt = dirCnt;
-    }
-
-    /**
-     * @return Total length.
-     */
-    public long totalLength() {
-        return totalLen;
-    }
-
-    /**
-     * @param totalLen Total length.
-     */
-    public void totalLength(long totalLen) {
-        this.totalLen = totalLen;
-    }
-
-    /**
-     * @return Path for which summary is obtained.
-     */
-    public IgfsPath path() {
-        return path;
-    }
-
-    /**
-     * @param path Path for which summary is obtained.
-     */
-    public void path(IgfsPath path) {
-        this.path = path;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeInt(filesCnt);
-        out.writeInt(dirCnt);
-        out.writeLong(totalLen);
-
-        path.writeExternal(out);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        filesCnt = in.readInt();
-        dirCnt = in.readInt();
-        totalLen = in.readLong();
-
-        path = IgfsUtils.readPath(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriter writer) throws BinaryObjectException {
-        BinaryRawWriter rawWriter = writer.rawWriter();
-
-        rawWriter.writeInt(filesCnt);
-        rawWriter.writeInt(dirCnt);
-        rawWriter.writeLong(totalLen);
-
-        IgfsUtils.writePath(rawWriter, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReader reader) throws BinaryObjectException {
-        BinaryRawReader rawReader = reader.rawReader();
-
-        filesCnt = rawReader.readInt();
-        dirCnt = rawReader.readInt();
-        totalLen = rawReader.readLong();
-
-        path = IgfsUtils.readPath(rawReader);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsPathSummary.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java
deleted file mode 100644
index 63a64fc..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsUserContext.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.util.concurrent.Callable;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.lang.IgniteOutClosure;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Provides ability to execute IGFS code in a context of a specific user.
- */
-public abstract class IgfsUserContext {
-    /** Thread local to hold the current user context. */
-    private static final ThreadLocal<String> userStackThreadLocal = new ThreadLocal<>();
-
-    /**
-     * Executes given callable in the given user context.
-     * The main contract of this method is that {@link #currentUser()} method invoked
-     * inside closure always returns 'user' this callable executed with.
-     * @param user the user name to invoke closure on behalf of.
-     * @param c the closure to execute
-     * @param <T> The type of closure result.
-     * @return the result of closure execution.
-     * @throws IllegalArgumentException if user name is null or empty String or if the closure is null.
-     */
-    public static <T> T doAs(String user, final IgniteOutClosure<T> c) {
-        if (F.isEmpty(user))
-            throw new IllegalArgumentException("Failed to use null or empty user name.");
-
-        final String ctxUser = userStackThreadLocal.get();
-
-        if (F.eq(ctxUser, user))
-            return c.apply(); // correct context is already there
-
-        userStackThreadLocal.set(user);
-
-        try {
-            return c.apply();
-        }
-        finally {
-            userStackThreadLocal.set(ctxUser);
-        }
-    }
-
-    /**
-     * Same contract that {@link #doAs(String, IgniteOutClosure)} has, but accepts
-     * callable that throws checked Exception.
-     * The Exception is not ever wrapped anyhow.
-     * If your Callable throws Some specific checked Exceptions, the recommended usage pattern is:
-     * <pre name="code" class="java">
-     *  public Foo myOperation() throws MyCheckedException1, MyCheckedException2 {
-     *      try {
-     *          return IgfsUserContext.doAs(user, new Callable<Foo>() {
-     *              &#64;Override public Foo call() throws MyCheckedException1, MyCheckedException2 {
-     *                  return makeSomeFoo(); // do the job
-     *              }
-     *          });
-     *      }
-     *      catch (MyCheckedException1 | MyCheckedException2 | RuntimeException | Error e) {
-     *          throw e;
-     *      }
-     *      catch (Exception e) {
-     *          throw new AssertionError("Must never go there.");
-     *      }
-     *  }
-     * </pre>
-     * @param user the user name to invoke closure on behalf of.
-     * @param c the Callable to execute
-     * @param <T> The type of callable result.
-     * @return the result of closure execution.
-     * @throws IllegalArgumentException if user name is null or empty String or if the closure is null.
-     */
-    public static <T> T doAs(String user, final Callable<T> c) throws Exception {
-        if (F.isEmpty(user))
-            throw new IllegalArgumentException("Failed to use null or empty user name.");
-
-        final String ctxUser = userStackThreadLocal.get();
-
-        if (F.eq(ctxUser, user))
-            return c.call(); // correct context is already there
-
-        userStackThreadLocal.set(user);
-
-        try {
-            return c.call();
-        }
-        finally {
-            userStackThreadLocal.set(ctxUser);
-        }
-    }
-
-    /**
-     * Gets the current context user.
-     * If this method is invoked outside of any {@link #doAs(String, IgniteOutClosure)} on the call stack, it will
-     * return null. Otherwise it will return the user name set in the most lower
-     * {@link #doAs(String, IgniteOutClosure)} call on the call stack.
-     * @return The current user, may be null.
-     */
-    @Nullable public static String currentUser() {
-        return userStackThreadLocal.get();
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsFileRange.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsFileRange.java
deleted file mode 100644
index 533bc60..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsFileRange.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Entity representing part of IGFS file identified by file path, start position, and length.
- */
-public class IgfsFileRange {
-    /** File path. */
-    private IgfsPath path;
-
-    /** Start position. */
-    private long start;
-
-    /** Length. */
-    private long len;
-
-    /**
-     * Creates file range.
-     *
-     * @param path File path.
-     * @param start Start position.
-     * @param len Length.
-     */
-    public IgfsFileRange(IgfsPath path, long start, long len) {
-        this.path = path;
-        this.start = start;
-        this.len = len;
-    }
-
-    /**
-     * Gets file path.
-     *
-     * @return File path.
-     */
-    public IgfsPath path() {
-        return path;
-    }
-
-    /**
-     * Gets range start position.
-     *
-     * @return Start position.
-     */
-    public long start() {
-        return start;
-    }
-
-    /**
-     * Gets range length.
-     *
-     * @return Length.
-     */
-    public long length() {
-        return len;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsFileRange.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsInputStreamJobAdapter.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsInputStreamJobAdapter.java
deleted file mode 100644
index 01cfc5a..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsInputStreamJobAdapter.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import java.io.IOException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.internal.util.GridFixedSizeInputStream;
-
-/**
- * Convenient {@link IgfsJob} adapter. It limits data returned from {@link IgfsInputStream} to bytes within
- * the {@link IgfsFileRange} assigned to the job.
- * <p>
- * Under the covers it simply puts job's {@code IgfsInputStream} position to range start and wraps in into
- * {@link GridFixedSizeInputStream} limited to range length.
- */
-public abstract class IgfsInputStreamJobAdapter extends IgfsJobAdapter {
-    /** {@inheritDoc} */
-    @Override public final Object execute(IgniteFileSystem igfs, IgfsFileRange range, IgfsInputStream in)
-        throws IgniteException, IOException {
-        in.seek(range.start());
-
-        return execute(igfs, new IgfsRangeInputStream(in, range));
-    }
-
-    /**
-     * Executes this job.
-     *
-     * @param igfs IGFS instance.
-     * @param in Input stream.
-     * @return Execution result.
-     * @throws IgniteException If execution failed.
-     * @throws IOException If IO exception encountered while working with stream.
-     */
-    public abstract Object execute(IgniteFileSystem igfs, IgfsRangeInputStream in) throws IgniteException, IOException;
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJob.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJob.java
deleted file mode 100644
index 7ec1afa..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJob.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import java.io.IOException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.igfs.IgfsInputStream;
-
-/**
- * Defines executable unit for {@link IgfsTask}. Before this job is executed, it is assigned one of the
- * ranges provided by the {@link IgfsRecordResolver} passed to one of the {@code IgniteFs.execute(...)} methods.
- * <p>
- * {@link #execute(org.apache.ignite.IgniteFileSystem, IgfsFileRange, org.apache.ignite.igfs.IgfsInputStream)} method is given {@link IgfsFileRange} this
- * job is expected to operate on, and already opened {@link org.apache.ignite.igfs.IgfsInputStream} for the file this range belongs to.
- * <p>
- * Note that provided input stream has position already adjusted to range start. However, it will not
- * automatically stop on range end. This is done to provide capability in some cases to look beyond
- * the range end or seek position before the reange start.
- * <p>
- * In majority of the cases, when you want to process only provided range, you should explicitly control amount
- * of returned data and stop at range end. You can also use {@link IgfsInputStreamJobAdapter}, which operates
- * on {@link IgfsRangeInputStream} bounded to range start and end, or manually wrap provided input stream with
- * {@link IgfsRangeInputStream}.
- * <p>
- * You can inject any resources in concrete implementation, just as with regular {@link org.apache.ignite.compute.ComputeJob} implementations.
- */
-public interface IgfsJob {
-    /**
-     * Executes this job.
-     *
-     * @param igfs IGFS instance.
-     * @param range File range aligned to record boundaries.
-     * @param in Input stream for split file. This input stream is not aligned to range and points to file start
-     *     by default.
-     * @return Execution result.
-     * @throws IgniteException If execution failed.
-     * @throws IOException If file system operation resulted in IO exception.
-     */
-    public Object execute(IgniteFileSystem igfs, IgfsFileRange range, IgfsInputStream in) throws IgniteException,
-        IOException;
-
-    /**
-     * This method is called when system detects that completion of this
-     * job can no longer alter the overall outcome (for example, when parent task
-     * has already reduced the results). Job is also cancelled when
-     * {@link org.apache.ignite.compute.ComputeTaskFuture#cancel()} is called.
-     * <p>
-     * Note that job cancellation is only a hint, and just like with
-     * {@link Thread#interrupt()}  method, it is really up to the actual job
-     * instance to gracefully finish execution and exit.
-     */
-    public void cancel();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJobAdapter.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJobAdapter.java
deleted file mode 100644
index 35873fe..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJobAdapter.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-/**
- * Adapter for {@link IgfsJob} with no-op implementation of {@link #cancel()} method.
- */
-public abstract class IgfsJobAdapter implements IgfsJob {
-    /** {@inheritDoc} */
-    @Override public void cancel() {
-        // No-op.
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRangeInputStream.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRangeInputStream.java
deleted file mode 100644
index 449b87d..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRangeInputStream.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import java.io.EOFException;
-import java.io.IOException;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.NotNull;
-
-/**
- * Decorator for regular {@link org.apache.ignite.igfs.IgfsInputStream} which streams only data within the given range.
- * This stream is used for {@link IgfsInputStreamJobAdapter} convenience adapter to create
- * jobs which will be working only with the assigned range. You can also use it explicitly when
- * working with {@link IgfsJob} directly.
- */
-public final class IgfsRangeInputStream extends IgfsInputStream {
-    /** Base input stream. */
-    private final IgfsInputStream is;
-
-    /** Start position. */
-    private final long start;
-
-    /** Maximum stream length. */
-    private final long maxLen;
-
-    /** Current position within the stream. */
-    private long pos;
-
-    /**
-     * Constructor.
-     *
-     * @param is Base input stream.
-     * @param start Start position.
-     * @param maxLen Maximum stream length.
-     * @throws IOException In case of exception.
-     */
-    public IgfsRangeInputStream(IgfsInputStream is, long start, long maxLen) throws IOException {
-        if (is == null)
-            throw new IllegalArgumentException("Input stream cannot be null.");
-
-        if (start < 0)
-            throw new IllegalArgumentException("Start position cannot be negative.");
-
-        if (start >= is.length())
-            throw new IllegalArgumentException("Start position cannot be greater that file length.");
-
-        if (maxLen < 0)
-            throw new IllegalArgumentException("Length cannot be negative.");
-
-        if (start + maxLen > is.length())
-            throw new IllegalArgumentException("Sum of start position and length cannot be greater than file length.");
-
-        this.is = is;
-        this.start = start;
-        this.maxLen = maxLen;
-
-        is.seek(start);
-    }
-
-    /** {@inheritDoc} */
-    @Override public long length() {
-        return is.length();
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param is Base input stream.
-     * @param range File range.
-     * @throws IOException In case of exception.
-     */
-    public IgfsRangeInputStream(IgfsInputStream is, IgfsFileRange range) throws IOException {
-        this(is, range.start(), range.length());
-    }
-
-    /** {@inheritDoc} */
-    @Override public int read() throws IOException {
-        if (pos < maxLen) {
-            int res = is.read();
-
-            if (res != -1)
-                pos++;
-
-            return res;
-        }
-        else
-            return -1;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int read(@NotNull byte[] b, int off, int len) throws IOException {
-        if (pos < maxLen) {
-            len = (int)Math.min(len, maxLen - pos);
-
-            int res = is.read(b, off, len);
-
-            if (res != -1)
-                pos += res;
-
-            return res;
-        }
-        else
-            return -1;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int read(long pos, byte[] buf, int off, int len) throws IOException {
-        seek(pos);
-
-        return read(buf, off, len);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFully(long pos, byte[] buf) throws IOException {
-        readFully(pos, buf, 0, buf.length);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFully(long pos, byte[] buf, int off, int len) throws IOException {
-        seek(pos);
-
-        for (int readBytes = 0; readBytes < len;) {
-            int read = read(buf, off + readBytes, len - readBytes);
-
-            if (read == -1)
-                throw new EOFException("Failed to read stream fully (stream ends unexpectedly) [pos=" + pos +
-                    ", buf.length=" + buf.length + ", off=" + off + ", len=" + len + ']');
-
-            readBytes += read;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void seek(long pos) throws IOException {
-        if (pos < 0)
-            throw new IOException("Seek position cannot be negative: " + pos);
-
-        is.seek(start + pos);
-
-        this.pos = pos;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long position() {
-        return pos;
-    }
-
-    /**
-     * Since range input stream represents a part of larger file stream, there is an offset at which this
-     * range input stream starts in original input stream. This method returns start offset of this input
-     * stream relative to original input stream.
-     *
-     * @return Start offset in original input stream.
-     */
-    public long startOffset() {
-        return start;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int available() {
-        long l = maxLen - pos;
-
-        if (l < 0)
-            return 0;
-
-        if (l > Integer.MAX_VALUE)
-            return Integer.MAX_VALUE;
-
-        return (int)l;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() throws IOException {
-        is.close();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsRangeInputStream.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRecordResolver.java
deleted file mode 100644
index 5b25299..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRecordResolver.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import java.io.IOException;
-import java.io.Serializable;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * IGFS record resolver. When {@link IgfsTask} is split into {@link IgfsJob}s each produced job will obtain
- * {@link IgfsFileRange} based on file data location. Record resolver is invoked in each job before actual
- * execution in order to adjust record boundaries in a way consistent with user data.
- * <p>
- * E.g., you may want to split your task into jobs so that each job process zero, one or several lines from that file.
- * But file is split into ranges based on block locations, not new line boundaries. Using convenient record resolver
- * you can adjust job range so that it covers the whole line(s).
- * <p>
- * The following record resolvers are available out of the box:
- * <ul>
- *     <li>{@link org.apache.ignite.igfs.mapreduce.records.IgfsFixedLengthRecordResolver}</li>
- *     <li>{@link org.apache.ignite.igfs.mapreduce.records.IgfsByteDelimiterRecordResolver}</li>
- *     <li>{@link org.apache.ignite.igfs.mapreduce.records.IgfsStringDelimiterRecordResolver}</li>
- *     <li>{@link org.apache.ignite.igfs.mapreduce.records.IgfsNewLineRecordResolver}</li>
- * </ul>
- */
-public interface IgfsRecordResolver extends Serializable {
-    /**
-     * Adjusts record start offset and length.
-     *
-     * @param fs IGFS instance to use.
-     * @param stream Input stream for split file.
-     * @param suggestedRecord Suggested file system record.
-     * @return New adjusted record. If this method returns {@code null}, original record is ignored.
-     * @throws IgniteException If resolve failed.
-     * @throws IOException If resolve failed.
-     */
-    @Nullable public IgfsFileRange resolveRecords(IgniteFileSystem fs, IgfsInputStream stream,
-        IgfsFileRange suggestedRecord) throws IgniteException, IOException;
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java
deleted file mode 100644
index d65338b..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.compute.ComputeJob;
-import org.apache.ignite.compute.ComputeTaskAdapter;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.IgniteKernal;
-import org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.resources.IgniteInstanceResource;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * IGFS task which can be executed on the grid using one of {@code IgniteFs.execute()} methods. Essentially IGFS task
- * is regular {@link org.apache.ignite.compute.ComputeTask} with different map logic. Instead of implementing
- * {@link org.apache.ignite.compute.ComputeTask#map(List, Object)} method to split task into jobs, you must implement
- * {@link IgfsTask#createJob(org.apache.ignite.igfs.IgfsPath, IgfsFileRange, IgfsTaskArgs)} method.
- * <p>
- * Each file participating in IGFS task is split into {@link IgfsFileRange}s first. Normally range is a number of
- * consequent bytes located on a single node (see {@code IgfssGroupDataBlocksKeyMapper}). In case maximum range size
- * is provided (either through {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} or {@code IgniteFs.execute()}
- * argument), then ranges could be further divided into smaller chunks.
- * <p>
- * Once file is split into ranges, each range is passed to {@code IgfsTask.createJob()} method in order to create a
- * {@link IgfsJob}.
- * <p>
- * Finally all generated jobs are sent to Grid nodes for execution.
- * <p>
- * As with regular {@code ComputeTask} you can define your own logic for results handling and reduce step.
- * <p>
- * Here is an example of such a task:
- * <pre name="code" class="java">
- * public class WordCountTask extends IgfsTask&lt;String, Integer&gt; {
- *     &#64;Override
- *     public IgfsJob createJob(IgfsPath path, IgfsFileRange range, IgfsTaskArgs&lt;T&gt; args) throws IgniteCheckedException {
- *         // New job will be created for each range within each file.
- *         // We pass user-provided argument (which is essentially a word to look for) to that job.
- *         return new WordCountJob(args.userArgument());
- *     }
- *
- *     // Aggregate results into one compound result.
- *     public Integer reduce(List&lt;ComputeJobResult&gt; results) throws IgniteCheckedException {
- *         Integer total = 0;
- *
- *         for (ComputeJobResult res : results) {
- *             Integer cnt = res.getData();
- *
- *             // Null can be returned for non-existent file in case we decide to ignore such situations.
- *             if (cnt != null)
- *                 total += cnt;
- *         }
- *
- *         return total;
- *     }
- * }
- * </pre>
- */
-public abstract class IgfsTask<T, R> extends ComputeTaskAdapter<IgfsTaskArgs<T>, R> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Injected grid. */
-    @IgniteInstanceResource
-    private Ignite ignite;
-
-    /** {@inheritDoc} */
-    @Nullable @Override public final Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid,
-        @Nullable IgfsTaskArgs<T> args) {
-        assert ignite != null;
-        assert args != null;
-
-        IgniteFileSystem fs = ignite.fileSystem(args.igfsName());
-        IgfsProcessorAdapter igfsProc = ((IgniteKernal) ignite).context().igfs();
-
-        Map<ComputeJob, ClusterNode> splitMap = new HashMap<>();
-
-        Map<UUID, ClusterNode> nodes = mapSubgrid(subgrid);
-
-        for (IgfsPath path : args.paths()) {
-            IgfsFile file = fs.info(path);
-
-            if (file == null) {
-                if (args.skipNonExistentFiles())
-                    continue;
-                else
-                    throw new IgniteException("Failed to process IGFS file because it doesn't exist: " + path);
-            }
-
-            Collection<IgfsBlockLocation> aff = fs.affinity(path, 0, file.length(), args.maxRangeLength());
-
-            long totalLen = 0;
-
-            for (IgfsBlockLocation loc : aff) {
-                ClusterNode node = null;
-
-                for (UUID nodeId : loc.nodeIds()) {
-                    node = nodes.get(nodeId);
-
-                    if (node != null)
-                        break;
-                }
-
-                if (node == null)
-                    throw new IgniteException("Failed to find any of block affinity nodes in subgrid [loc=" + loc +
-                        ", subgrid=" + subgrid + ']');
-
-                IgfsJob job = createJob(path, new IgfsFileRange(file.path(), loc.start(), loc.length()), args);
-
-                if (job != null) {
-                    ComputeJob jobImpl = igfsProc.createJob(job, fs.name(), file.path(), loc.start(),
-                        loc.length(), args.recordResolver());
-
-                    splitMap.put(jobImpl, node);
-                }
-
-                totalLen += loc.length();
-            }
-
-            assert totalLen == file.length();
-        }
-
-        return splitMap;
-    }
-
-    /**
-     * Callback invoked during task map procedure to create job that will process specified split
-     * for IGFS file.
-     *
-     * @param path Path.
-     * @param range File range based on consecutive blocks. This range will be further
-     *      realigned to record boundaries on destination node.
-     * @param args Task argument.
-     * @return IGFS job. If {@code null} is returned, the passed in file range will be skipped.
-     * @throws IgniteException If job creation failed.
-     */
-    @Nullable public abstract IgfsJob createJob(IgfsPath path, IgfsFileRange range,
-        IgfsTaskArgs<T> args) throws IgniteException;
-
-    /**
-     * Maps list by node ID.
-     *
-     * @param subgrid Subgrid.
-     * @return Map.
-     */
-    private Map<UUID, ClusterNode> mapSubgrid(Collection<ClusterNode> subgrid) {
-        Map<UUID, ClusterNode> res = U.newHashMap(subgrid.size());
-
-        for (ClusterNode node : subgrid)
-            res.put(node.id(), node);
-
-        return res;
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java
deleted file mode 100644
index e8b0684..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import java.util.Collection;
-import org.apache.ignite.igfs.IgfsPath;
-
-/**
- * IGFS task arguments. When you initiate new IGFS task execution using one of {@code IgniteFs.execute(...)} methods,
- * all passed parameters are encapsulated in a single {@code IgfsTaskArgs} object. Later on this object is
- * passed to {@link IgfsTask#createJob(org.apache.ignite.igfs.IgfsPath, IgfsFileRange, IgfsTaskArgs)} method.
- * <p>
- * Task arguments encapsulates the following data:
- * <ul>
- *     <li>IGFS name</li>
- *     <li>File paths passed to {@code IgniteFs.execute()} method</li>
- *     <li>{@link IgfsRecordResolver} for that task</li>
- *     <li>Flag indicating whether to skip non-existent file paths or throw an exception</li>
- *     <li>User-defined task argument</li>
- *     <li>Maximum file range length for that task (see {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()})</li>
- * </ul>
- */
-public interface IgfsTaskArgs<T> {
-    /**
-     * Gets IGFS name.
-     *
-     * @return IGFS name.
-     */
-    public String igfsName();
-
-    /**
-     * Gets file paths to process.
-     *
-     * @return File paths to process.
-     */
-    public Collection<IgfsPath> paths();
-
-    /**
-     * Gets record resolver for the task.
-     *
-     * @return Record resolver.
-     */
-    public IgfsRecordResolver recordResolver();
-
-    /**
-     * Flag indicating whether to fail or simply skip non-existent files.
-     *
-     * @return {@code True} if non-existent files should be skipped.
-     */
-    public boolean skipNonExistentFiles();
-
-    /**
-     * User argument provided for task execution.
-     *
-     * @return User argument.
-     */
-    public T userArgument();
-
-    /**
-     * Optional maximum allowed range length, {@code 0} by default. If not specified, full range including
-     * all consecutive blocks will be used without any limitations.
-     *
-     * @return Maximum range length.
-     */
-    public long maxRangeLength();
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskNoReduceAdapter.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskNoReduceAdapter.java
deleted file mode 100644
index 450a7c3..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskNoReduceAdapter.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce;
-
-import java.util.List;
-import org.apache.ignite.compute.ComputeJobResult;
-
-/**
- * Convenient {@link IgfsTask} adapter with empty reduce step. Use this adapter in case you are not interested in
- * results returned by jobs.
- */
-public abstract class IgfsTaskNoReduceAdapter<T, R> extends IgfsTask<T, R> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Default implementation which will ignore all results sent from execution nodes.
-     *
-     * @param results Received results of broadcasted remote executions. Note that if task class has
-     *      {@link org.apache.ignite.compute.ComputeTaskNoResultCache} annotation, then this list will be empty.
-     * @return Will always return {@code null}.
-     */
-    @Override public R reduce(List<ComputeJobResult> results) {
-        return null;
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/package-info.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/package-info.java
deleted file mode 100644
index e9f98d4..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Contains APIs for In-Memory MapReduce over IGFS.
- */
-
-package org.apache.ignite.igfs.mapreduce;
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsByteDelimiterRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsByteDelimiterRecordResolver.java
deleted file mode 100644
index 4f5d205..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsByteDelimiterRecordResolver.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce.records;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.Map;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.igfs.mapreduce.IgfsFileRange;
-import org.apache.ignite.igfs.mapreduce.IgfsRecordResolver;
-import org.apache.ignite.internal.util.tostring.GridToStringExclude;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Record resolver which adjusts records based on provided delimiters. Both start position and length are
- * shifted to the right, based on delimiter positions.
- * <p>
- * Note that you can use {@link IgfsStringDelimiterRecordResolver} if your delimiter is a plain string.
- */
-public class IgfsByteDelimiterRecordResolver implements IgfsRecordResolver, Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Delimiters. */
-    private byte[][] delims;
-
-    /** Maximum delimiter length. */
-    @GridToStringExclude
-    private int maxDelimLen;
-
-    /**
-     * Empty constructor required for {@link Externalizable} support.
-     */
-    public IgfsByteDelimiterRecordResolver() {
-        // No-op.
-    }
-
-    /**
-     * Creates delimiter-based record resolver.
-     *
-     * @param delims Delimiters.
-     */
-    public IgfsByteDelimiterRecordResolver(byte[]... delims) {
-        if (delims == null || delims.length == 0)
-            throw new IllegalArgumentException("Delimiters cannot be null or empty.");
-
-        this.delims = delims;
-
-        int maxDelimLen = 0;
-
-        for (byte[] delim : delims) {
-            if (delim == null)
-                throw new IllegalArgumentException("Delimiter cannot be null.");
-            else if (maxDelimLen < delim.length)
-                maxDelimLen = delim.length;
-        }
-
-        this.maxDelimLen = maxDelimLen;
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFileRange resolveRecords(IgniteFileSystem fs, IgfsInputStream stream,
-        IgfsFileRange suggestedRecord) throws IgniteException, IOException {
-        long suggestedStart = suggestedRecord.start();
-        long suggestedEnd = suggestedStart + suggestedRecord.length();
-
-        IgniteBiTuple<State, Delimiter> firstDelim = findFirstDelimiter(stream, suggestedStart);
-
-        State state = firstDelim != null ? firstDelim.getKey() : new State();
-
-        Delimiter curDelim = firstDelim.getValue();
-
-        while (curDelim != null && curDelim.end < suggestedStart)
-            curDelim = nextDelimiter(stream, state);
-
-        if (curDelim != null && (curDelim.end >= suggestedStart && curDelim.end < suggestedEnd) ||
-            suggestedStart == 0 ) {
-            // We found start delimiter.
-            long start = suggestedStart == 0 ? 0 : curDelim.end;
-
-            if (curDelim == null || curDelim.end < suggestedEnd) {
-                IgniteBiTuple<State, Delimiter> lastDelim = findFirstDelimiter(stream, suggestedEnd);
-
-                state = lastDelim != null ? firstDelim.getKey() : new State();
-
-                curDelim = lastDelim.getValue();
-
-                while (curDelim != null && curDelim.end < suggestedEnd)
-                    curDelim = nextDelimiter(stream, state);
-            }
-
-            long end = curDelim != null ? curDelim.end : stream.position();
-
-            return new IgfsFileRange(suggestedRecord.path(), start, end - start);
-        }
-        else
-            // We failed to find any delimiters up to the EOS.
-            return null;
-    }
-
-    /**
-     * Calculate maximum delimiters length.
-     *
-     * @param delims Delimiters.
-     * @return Maximum delimiter length.
-     */
-    private int maxDelimiterLength(byte[][] delims) {
-        int maxDelimLen = 0;
-
-        for (byte[] delim : delims) {
-            if (delim == null)
-                throw new IllegalArgumentException("Delimiter cannot be null.");
-            else if (maxDelimLen < delim.length)
-                maxDelimLen = delim.length;
-        }
-
-        return maxDelimLen;
-    }
-
-    /**
-     * Find first delimiter. In order to achieve this we have to rewind the stream until we find the delimiter
-     * which stands at least [maxDelimLen] from the start search position or until we faced stream start.
-     * Otherwise we cannot be sure that delimiter position is determined correctly.
-     *
-     * @param stream IGFS input stream.
-     * @param startPos Start search position.
-     * @return The first found delimiter.
-     * @throws IOException In case of IO exception.
-     */
-    @Nullable private IgniteBiTuple<State, Delimiter> findFirstDelimiter(IgfsInputStream stream, long startPos)
-        throws IOException {
-        State state;
-        Delimiter delim;
-
-        long curPos = Math.max(0, startPos - maxDelimLen);
-
-        while (true) {
-            stream.seek(curPos);
-
-            state = new State();
-
-            delim = nextDelimiter(stream, state);
-
-            if (curPos == 0 || delim == null || delim.start - curPos > maxDelimLen - 1)
-                break;
-            else
-                curPos = Math.max(0, curPos - maxDelimLen);
-        }
-
-        return F.t(state, delim);
-    }
-
-    /**
-     * Resolve next delimiter.
-     *
-     * @param is IGFS input stream.
-     * @param state Current state.
-     * @return Next delimiter and updated map.
-     * @throws IOException In case of exception.
-     */
-    private Delimiter nextDelimiter(IgfsInputStream is, State state) throws IOException {
-        assert is != null;
-        assert state != null;
-
-        Map<Integer, Integer> parts = state.parts;
-        LinkedList<Delimiter> delimQueue = state.delims;
-
-        int nextByte = is.read();
-
-        while (nextByte != -1) {
-            // Process read byte.
-            for (int idx = 0; idx < delims.length; idx++) {
-                byte[] delim = delims[idx];
-
-                int val = parts.containsKey(idx) ? parts.get(idx) : 0;
-
-                if (delim[val] == nextByte) {
-                    if (val == delim.length - 1) {
-                        // Full delimiter is found.
-                        parts.remove(idx);
-
-                        Delimiter newDelim = new Delimiter(is.position() - delim.length, is.position());
-
-                        // Read queue from the end looking for the "inner" delimiters.
-                        boolean ignore = false;
-
-                        int replaceIdx = -1;
-
-                        for (int i = delimQueue.size() - 1; i >= 0; i--) {
-                            Delimiter prevDelim = delimQueue.get(i);
-
-                            if (prevDelim.start < newDelim.start) {
-                                if (prevDelim.end > newDelim.start) {
-                                    // Ignore this delimiter.
-                                    ignore = true;
-
-                                    break;
-                                }
-                            }
-                            else if (prevDelim.start == newDelim.start) {
-                                // Ok, we found matching delimiter.
-                                replaceIdx = i;
-
-                                break;
-                            }
-                        }
-
-                        if (!ignore) {
-                            if (replaceIdx >= 0)
-                                delimQueue.removeAll(delimQueue.subList(replaceIdx, delimQueue.size()));
-
-                            delimQueue.add(newDelim);
-                        }
-                    }
-                    else
-                        parts.put(idx, ++val);
-                }
-                else if (val != 0) {
-                    if (delim[0] == nextByte) {
-                        boolean shift = true;
-
-                        for (int k = 1; k < val; k++) {
-                            if (delim[k] != nextByte) {
-                                shift = false;
-
-                                break;
-                            }
-                        }
-
-                        if (!shift)
-                            parts.put(idx, 1);
-                    }
-                    else
-                        // Delimiter sequence is totally broken.
-                        parts.remove(idx);
-                }
-            }
-
-            // Check whether we can be sure that the first delimiter will not change.
-            if (!delimQueue.isEmpty()) {
-                Delimiter delim = delimQueue.get(0);
-
-                if (is.position() - delim.end >= maxDelimLen)
-                    return delimQueue.poll();
-            }
-
-            nextByte = is.read();
-        }
-
-        return delimQueue.poll();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsByteDelimiterRecordResolver.class, this);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        if (delims != null) {
-            out.writeBoolean(true);
-
-            out.writeInt(delims.length);
-
-            for (byte[] delim : delims)
-                U.writeByteArray(out, delim);
-        }
-        else
-            out.writeBoolean(false);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        if (in.readBoolean()) {
-            int len = in.readInt();
-
-            delims = new byte[len][];
-
-            for (int i = 0; i < len; i++)
-                delims[i] = U.readByteArray(in);
-
-            maxDelimLen = maxDelimiterLength(delims);
-        }
-    }
-
-    /**
-     * Delimiter descriptor.
-     */
-    private static class Delimiter {
-        /** Delimiter start position. */
-        private final long start;
-
-        /** Delimiter end position. */
-        private final long end;
-
-        /**
-         * Constructor.
-         *
-         * @param start Delimiter start position.
-         * @param end Delimiter end position.
-         */
-        private Delimiter(long start, long end) {
-            assert start >= 0 && end >= 0 && start <= end;
-
-            this.start = start;
-            this.end = end;
-        }
-    }
-
-    /**
-     * Current resolution state.
-     */
-    private static class State {
-        /** Partially resolved delimiters. */
-        private final Map<Integer, Integer> parts;
-
-        /** Resolved delimiters which could potentially be merged. */
-        private final LinkedList<Delimiter> delims;
-
-        /**
-         * Constructor.
-         */
-        private State() {
-            parts = new HashMap<>();
-
-            delims = new LinkedList<>();
-        }
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsFixedLengthRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsFixedLengthRecordResolver.java
deleted file mode 100644
index f6d5e19..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsFixedLengthRecordResolver.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce.records;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.igfs.mapreduce.IgfsFileRange;
-import org.apache.ignite.igfs.mapreduce.IgfsRecordResolver;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Record resolver which adjusts records to fixed length. That is, start offset of the record is shifted to the
- * nearest position so that {@code newStart % length == 0}.
- */
-public class IgfsFixedLengthRecordResolver implements IgfsRecordResolver, Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Record length. */
-    private long recLen;
-
-    /**
-     * Empty constructor required for {@link Externalizable} support.
-     */
-    public IgfsFixedLengthRecordResolver() {
-        // No-op.
-    }
-
-    /**
-     * Creates fixed-length record resolver.
-     *
-     * @param recLen Record length.
-     */
-    public IgfsFixedLengthRecordResolver(long recLen) {
-        this.recLen = recLen;
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFileRange resolveRecords(IgniteFileSystem fs, IgfsInputStream stream,
-        IgfsFileRange suggestedRecord)
-        throws IgniteException, IOException {
-        long suggestedEnd = suggestedRecord.start() + suggestedRecord.length();
-
-        long startRem = suggestedRecord.start() % recLen;
-        long endRem = suggestedEnd % recLen;
-
-        long start = Math.min(suggestedRecord.start() + (startRem != 0 ? (recLen - startRem) : 0),
-            stream.length());
-        long end = Math.min(suggestedEnd + (endRem != 0 ? (recLen - endRem) : 0), stream.length());
-
-        assert end >= start;
-
-        return start != end ? new IgfsFileRange(suggestedRecord.path(), start, end - start) : null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsFixedLengthRecordResolver.class, this);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeLong(recLen);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        recLen = in.readLong();
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsNewLineRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsNewLineRecordResolver.java
deleted file mode 100644
index 10b099e..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsNewLineRecordResolver.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce.records;
-
-import java.io.Externalizable;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Record resolver based on new line detection. This resolver can detect new lines based on '\n' or '\r\n' sequences.
- * <p>
- * Note that this resolver cannot be created and has one constant implementations: {@link #NEW_LINE}.
- */
-public class IgfsNewLineRecordResolver extends IgfsByteDelimiterRecordResolver {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Singleton new line resolver. This resolver will resolve records based on new lines
-     * regardless if they have '\n' or '\r\n' patterns.
-     */
-    public static final IgfsNewLineRecordResolver NEW_LINE = new IgfsNewLineRecordResolver(true);
-
-    /** CR symbol. */
-    public static final byte SYM_CR = 0x0D;
-
-    /** LF symbol. */
-    public static final byte SYM_LF = 0x0A;
-
-    /**
-     * Empty constructor required for {@link Externalizable} support.
-     */
-    public IgfsNewLineRecordResolver() {
-        // No-op.
-    }
-
-    /**
-     * Creates new-line record resolver.
-     *
-     * @param b Artificial flag to differentiate from empty constructor.
-     */
-    private IgfsNewLineRecordResolver(boolean b) {
-        super(new byte[] { SYM_CR, SYM_LF }, new byte[] { SYM_LF });
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsNewLineRecordResolver.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsStringDelimiterRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsStringDelimiterRecordResolver.java
deleted file mode 100644
index 6dd029c..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsStringDelimiterRecordResolver.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.mapreduce.records;
-
-import java.io.Externalizable;
-import java.nio.charset.Charset;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Record resolver based on delimiters represented as strings. Works in the same way as
- * {@link IgfsByteDelimiterRecordResolver}, but uses strings as delimiters instead of byte arrays.
- */
-public class IgfsStringDelimiterRecordResolver extends IgfsByteDelimiterRecordResolver {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Converts string delimiters to byte delimiters.
-     *
-     * @param charset Charset.
-     * @param delims String delimiters.
-     * @return Byte delimiters.
-     */
-    @Nullable private static byte[][] toBytes(Charset charset, @Nullable String... delims) {
-        byte[][] res = null;
-
-        if (delims != null) {
-            res = new byte[delims.length][];
-
-            for (int i = 0; i < delims.length; i++)
-                res[i] = delims[i].getBytes(charset);
-        }
-
-        return res;
-    }
-
-    /**
-     * Empty constructor required for {@link Externalizable} support.
-     */
-    public IgfsStringDelimiterRecordResolver() {
-        // No-op.
-    }
-
-    /**
-     * Creates record resolver from given string and given charset.
-     *
-     * @param delims Delimiters.
-     * @param charset Charset.
-     */
-    public IgfsStringDelimiterRecordResolver(Charset charset, String... delims) {
-        super(toBytes(charset, delims));
-    }
-
-    /**
-     * Creates record resolver based on given string with default charset.
-     *
-     * @param delims Delimiters.
-     */
-    public IgfsStringDelimiterRecordResolver(String... delims) {
-        super(toBytes(Charset.defaultCharset(), delims));
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsStringDelimiterRecordResolver.class, this);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/package-info.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/package-info.java
deleted file mode 100644
index 0394a88..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Contains record resolvers for In-Memory MapReduce over IGFS.
- */
-
-package org.apache.ignite.igfs.mapreduce.records;
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/package-info.java b/modules/core/src/main/java/org/apache/ignite/igfs/package-info.java
deleted file mode 100644
index b863408..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Contains <b>IG</b>nite <b>F</b>ile <b>S</b>ystem APIs.
- */
-
-package org.apache.ignite.igfs;
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
deleted file mode 100644
index d23552d..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.secondary;
-
-import java.io.OutputStream;
-import java.util.Collection;
-import java.util.Map;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathNotFoundException;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Secondary file system interface.
- */
-public interface IgfsSecondaryFileSystem {
-    /**
-     * Checks if the specified path exists.
-     *
-     * @param path Path to check for existence.
-     * @return {@code True} if such file exists, otherwise - {@code false}.
-     * @throws IgniteException In case of error.
-     */
-    public boolean exists(IgfsPath path);
-
-    /**
-     * Updates file information for the specified path. Existent properties, not listed in the passed collection,
-     * will not be affected. Other properties will be added or overwritten. Passed properties with {@code null} values
-     * will be removed from the stored properties or ignored if they don't exist in the file info.
-     * <p>
-     * When working in {@code DUAL_SYNC} or {@code DUAL_ASYNC} modes with Hadoop secondary file system only the
-     * following properties will be updated on the secondary file system:
-     * <ul>
-     * <li>{@code usrName} - file owner name;</li>
-     * <li>{@code grpName} - file owner group;</li>
-     * <li>{@code permission} - Unix-style string representing file permissions.</li>
-     * </ul>
-     *
-     * @param path File path to set properties for.
-     * @param props Properties to update.
-     * @return File information for specified path or {@code null} if such path does not exist.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteException;
-
-    /**
-     * Renames/moves a file.
-     * <p>
-     * You are free to rename/move data files as you wish, but directories can be only renamed.
-     * You cannot move the directory between different parent directories.
-     * <p>
-     * Examples:
-     * <ul>
-     *     <li>"/work/file.txt" => "/home/project/Presentation Scenario.txt"</li>
-     *     <li>"/work" => "/work-2012.bkp"</li>
-     *     <li>"/work" => "<strike>/backups/work</strike>" - such operation is restricted for directories.</li>
-     * </ul>
-     *
-     * @param src Source file path to rename.
-     * @param dest Destination file path. If destination path is a directory, then source file will be placed
-     *     into destination directory with original name.
-     * @throws IgniteException In case of error.
-     * @throws org.apache.ignite.igfs.IgfsPathNotFoundException If source file doesn't exist.
-     */
-    public void rename(IgfsPath src, IgfsPath dest) throws IgniteException;
-
-    /**
-     * Deletes file.
-     *
-     * @param path File path to delete.
-     * @param recursive Delete non-empty directories recursively.
-     * @return {@code True} in case of success, {@code false} otherwise.
-     * @throws IgniteException In case of error.
-     */
-    public boolean delete(IgfsPath path, boolean recursive) throws IgniteException;
-
-    /**
-     * Creates directories under specified path.
-     *
-     * @param path Path of directories chain to create.
-     * @throws IgniteException In case of error.
-     */
-    public void mkdirs(IgfsPath path) throws IgniteException;
-
-    /**
-     * Creates directories under specified path with the specified properties.
-     *
-     * @param path Path of directories chain to create.
-     * @param props Metadata properties to set on created directories.
-     * @throws IgniteException In case of error.
-     */
-    public void mkdirs(IgfsPath path, @Nullable Map<String, String> props) throws IgniteException;
-
-    /**
-     * Lists file paths under the specified path.
-     *
-     * @param path Path to list files under.
-     * @return List of paths under the specified path.
-     * @throws IgniteException In case of error.
-     * @throws org.apache.ignite.igfs.IgfsPathNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteException;
-
-    /**
-     * Lists files under the specified path.
-     *
-     * @param path Path to list files under.
-     * @return List of files under the specified path.
-     * @throws IgniteException In case of error.
-     * @throws org.apache.ignite.igfs.IgfsPathNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteException;
-
-    /**
-     * Opens a file for reading.
-     *
-     * @param path File path to read.
-     * @param bufSize Read buffer size (bytes) or {@code zero} to use default value.
-     * @return File input stream to read data from.
-     * @throws IgniteException In case of error.
-     * @throws org.apache.ignite.igfs.IgfsPathNotFoundException If path doesn't exist.
-     */
-    public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) throws IgniteException;
-
-    /**
-     * Creates a file and opens it for writing.
-     *
-     * @param path File path to create.
-     * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory.
-     * @return File output stream to write data to.
-     * @throws IgniteException In case of error.
-     */
-    public OutputStream create(IgfsPath path, boolean overwrite) throws IgniteException;
-
-    /**
-     * Creates a file and opens it for writing.
-     *
-     * @param path File path to create.
-     * @param bufSize Write buffer size (bytes) or {@code zero} to use default value.
-     * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory.
-     * @param replication Replication factor.
-     * @param blockSize Block size.
-     * @param props File properties to set.
-     * @return File output stream to write data to.
-     * @throws IgniteException In case of error.
-     */
-    public OutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication, long blockSize,
-       @Nullable Map<String, String> props) throws IgniteException;
-
-    /**
-     * Opens an output stream to an existing file for appending data.
-     *
-     * @param path File path to append.
-     * @param bufSize Write buffer size (bytes) or {@code zero} to use default value.
-     * @param create Create file if it doesn't exist yet.
-     * @param props File properties to set only in case it file was just created.
-     * @return File output stream to append data to.
-     * @throws IgniteException In case of error.
-     * @throws org.apache.ignite.igfs.IgfsPathNotFoundException If path doesn't exist and create flag is {@code false}.
-     */
-    public OutputStream append(IgfsPath path, int bufSize, boolean create, @Nullable Map<String, String> props)
-        throws IgniteException;
-
-    /**
-     * Gets file information for the specified path.
-     *
-     * @param path Path to get information for.
-     * @return File information for specified path or {@code null} if such path does not exist.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsFile info(IgfsPath path) throws IgniteException;
-
-    /**
-     * Gets used space in bytes.
-     *
-     * @return Used space in bytes.
-     * @throws IgniteException In case of error.
-     */
-    public long usedSpaceSize() throws IgniteException;
-
-    /**
-     * Set times for the given path.
-     *
-     * @param path Path.
-     * @param modificationTime Modification time.
-     * @param accessTime Access time.
-     * @throws IgniteException If failed.
-     */
-    public void setTimes(IgfsPath path, long modificationTime, long accessTime) throws IgniteException;
-
-     /**
-     * Get affinity block locations for data blocks of the file. In case {@code maxLen} parameter is set and
-     * particular block location length is greater than this value, block locations will be split into smaller
-     * chunks.
-     *
-     * @param path File path to get affinity for.
-     * @param start Position in the file to start affinity resolution from.
-     * @param len Size of data in the file to resolve affinity for.
-     * @param maxLen Maximum length of a single returned block location length.
-     * @return Affinity block locations.
-     * @throws IgniteException In case of error.
-     * @throws IgfsPathNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len, long maxLen)
-        throws IgniteException;
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java
deleted file mode 100644
index ac71c4c..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.secondary;
-
-import java.io.Closeable;
-import java.io.IOException;
-
-/**
- * The simplest data input interface to read from secondary file system.
- */
-public interface IgfsSecondaryFileSystemPositionedReadable extends Closeable {
-    /**
-     * Read up to the specified number of bytes, from a given position within a file, and return the number of bytes
-     * read.
-     *
-     * @param pos Position in the input stream to seek.
-     * @param buf Buffer into which data is read.
-     * @param off Offset in the buffer from which stream data should be written.
-     * @param len The number of bytes to read.
-     * @return Total number of bytes read into the buffer, or -1 if there is no more data (EOF).
-     * @throws IOException In case of any exception.
-     */
-    public int read(long pos, byte[] buf, int off, int len) throws IOException;
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/local/LocalIgfsSecondaryFileSystem.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/local/LocalIgfsSecondaryFileSystem.java
deleted file mode 100644
index 26ad1f2..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/secondary/local/LocalIgfsSecondaryFileSystem.java
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs.secondary.local;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.LinkOption;
-import java.nio.file.Path;
-import java.nio.file.attribute.BasicFileAttributeView;
-import java.nio.file.attribute.BasicFileAttributes;
-import java.nio.file.attribute.FileTime;
-import java.nio.file.attribute.PosixFileAttributes;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsException;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathAlreadyExistsException;
-import org.apache.ignite.igfs.IgfsPathIsNotDirectoryException;
-import org.apache.ignite.igfs.IgfsPathNotFoundException;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystemPositionedReadable;
-import org.apache.ignite.internal.processors.igfs.IgfsBlockLocationImpl;
-import org.apache.ignite.internal.processors.igfs.IgfsDataManager;
-import org.apache.ignite.internal.processors.igfs.IgfsImpl;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.processors.igfs.secondary.local.LocalFileSystemBlockKey;
-import org.apache.ignite.internal.processors.igfs.secondary.local.LocalFileSystemIgfsFile;
-import org.apache.ignite.internal.processors.igfs.secondary.local.LocalFileSystemPositionedReadable;
-import org.apache.ignite.internal.processors.igfs.secondary.local.LocalFileSystemSizeVisitor;
-import org.apache.ignite.internal.processors.igfs.secondary.local.LocalFileSystemUtils;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lifecycle.LifecycleAware;
-import org.apache.ignite.resources.FileSystemResource;
-import org.apache.ignite.resources.LoggerResource;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Secondary file system which delegates to local file system.
- */
-public class LocalIgfsSecondaryFileSystem implements IgfsSecondaryFileSystem, LifecycleAware {
-    /** Path that will be added to each passed path. */
-    private String workDir;
-
-    /** Logger. */
-    @SuppressWarnings("unused")
-    @LoggerResource
-    private IgniteLogger log;
-
-    /** IGFS instance. */
-    @SuppressWarnings("unused")
-    @FileSystemResource
-    private IgfsImpl igfs;
-
-    /**
-     * Heuristically checks if exception was caused by invalid HDFS version and returns appropriate exception.
-     *
-     * @param e Exception to check.
-     * @param msg Detailed error message.
-     * @return Appropriate exception.
-     */
-    private IgfsException handleSecondaryFsError(IOException e, String msg) {
-        if (e instanceof FileNotFoundException)
-            return new IgfsPathNotFoundException(e);
-        else
-            return new IgfsException(msg, e);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean exists(IgfsPath path) {
-        return fileForPath(path).exists();
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public IgfsFile update(IgfsPath path, Map<String, String> props) {
-        File f = fileForPath(path);
-
-        if (!f.exists())
-            return null;
-
-        updatePropertiesIfNeeded(path, props);
-
-        return info(path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void rename(IgfsPath src, IgfsPath dest) {
-        File srcFile = fileForPath(src);
-        File destFile = fileForPath(dest);
-
-        if (!srcFile.exists())
-            throw new IgfsPathNotFoundException("Failed to perform rename because source path not found: " + src);
-
-        if (srcFile.isDirectory() && destFile.isFile())
-            throw new IgfsPathIsNotDirectoryException("Failed to perform rename because destination path is " +
-                "directory and source path is file [src=" + src + ", dest=" + dest + ']');
-
-        try {
-            if (destFile.isDirectory())
-                Files.move(srcFile.toPath(), destFile.toPath().resolve(srcFile.getName()));
-            else if (!srcFile.renameTo(destFile))
-                throw new IgfsException("Failed to perform rename (underlying file system returned false) " +
-                    "[src=" + src + ", dest=" + dest + ']');
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to rename [src=" + src + ", dest=" + dest + ']');
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean delete(IgfsPath path, boolean recursive) {
-        File f = fileForPath(path);
-
-        if (!recursive)
-            return f.delete();
-        else
-            return deleteRecursive(f, false);
-    }
-
-    /**
-     * Delete directory recursively.
-     *
-     * @param f Directory.
-     * @param deleteIfExists Ignore delete errors if the file doesn't exist.
-     * @return {@code true} if successful.
-     */
-    private boolean deleteRecursive(File f, boolean deleteIfExists) {
-        BasicFileAttributes attrs;
-
-        try {
-            attrs = Files.readAttributes(f.toPath(), BasicFileAttributes.class, LinkOption.NOFOLLOW_LINKS);
-        }
-        catch (IOException ignore) {
-            return deleteIfExists && !f.exists();
-        }
-
-        if (!attrs.isDirectory() || attrs.isSymbolicLink())
-            return f.delete() || (deleteIfExists && !f.exists());
-
-        File[] entries = f.listFiles();
-
-        if (entries != null) {
-            for (File entry : entries) {
-                boolean res = deleteRecursive(entry, true);
-
-                if (!res)
-                    return false;
-            }
-        }
-
-        return f.delete() || (deleteIfExists && !f.exists());
-    }
-
-    /** {@inheritDoc} */
-    @Override public void mkdirs(IgfsPath path) {
-        if (!mkdirs0(fileForPath(path)))
-            throw new IgniteException("Failed to make directories (underlying file system returned false): " + path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void mkdirs(IgfsPath path, @Nullable Map<String, String> props) {
-        mkdirs(path);
-
-        updatePropertiesIfNeeded(path, props);
-    }
-
-    /**
-     * Create directories.
-     *
-     * @param dir Directory.
-     * @return Result.
-     */
-    private boolean mkdirs0(@Nullable File dir) {
-        if (dir == null)
-            return true; // Nothing to create.
-
-        if (dir.exists())
-            // Already exists, so no-op.
-            return dir.isDirectory();
-        else {
-            File parentDir = dir.getParentFile();
-
-            if (!mkdirs0(parentDir)) // Create parent first.
-                return false;
-
-            boolean res = dir.mkdir();
-
-            if (!res)
-                res = dir.exists(); // Tolerate concurrent creation.
-
-            return res;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsPath> listPaths(IgfsPath path) {
-        File[] entries = listFiles0(path);
-
-        if (F.isEmpty(entries))
-            return Collections.emptySet();
-        else {
-            Collection<IgfsPath> res = U.newHashSet(entries.length);
-
-            for (File entry : entries)
-                res.add(igfsPath(entry));
-
-            return res;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsFile> listFiles(IgfsPath path) {
-        File[] entries = listFiles0(path);
-
-        if (F.isEmpty(entries))
-            return Collections.emptySet();
-        else {
-            Collection<IgfsFile> res = U.newHashSet(entries.length);
-
-            for (File entry : entries) {
-                IgfsFile info = info(igfsPath(entry));
-
-                if (info != null)
-                    res.add(info);
-            }
-
-            return res;
-        }
-    }
-
-    /**
-     * Returns an array of File object. Under the specific path.
-     *
-     * @param path IGFS path.
-     * @return Array of File objects.
-     */
-    @Nullable private File[] listFiles0(IgfsPath path) {
-        File f = fileForPath(path);
-
-        if (!f.exists())
-            throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
-        else
-            return f.listFiles();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) {
-        try {
-            FileInputStream in = new FileInputStream(fileForPath(path));
-
-            return new LocalFileSystemPositionedReadable(in, bufSize);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to open file for read: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputStream create(IgfsPath path, boolean overwrite) {
-        return create0(path, overwrite);
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication,
-        long blockSize, @Nullable Map<String, String> props) {
-        OutputStream os = create0(path, overwrite);
-
-        try {
-            updatePropertiesIfNeeded(path, props);
-
-            return os;
-        }
-        catch (Exception err) {
-            try {
-                os.close();
-            }
-            catch (IOException closeErr) {
-                err.addSuppressed(closeErr);
-            }
-
-            throw err;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputStream append(IgfsPath path, int bufSize, boolean create,
-        @Nullable Map<String, String> props) {
-        try {
-            File file = fileForPath(path);
-
-            boolean exists = file.exists();
-
-            if (exists) {
-                OutputStream os = new FileOutputStream(file, true);
-
-                try {
-                    updatePropertiesIfNeeded(path, props);
-
-                    return os;
-                }
-                catch (Exception err) {
-                    try {
-                        os.close();
-
-                        throw err;
-                    }
-                    catch (IOException closeErr) {
-                        err.addSuppressed(closeErr);
-
-                        throw err;
-                    }
-                }
-            }
-            else {
-                if (create)
-                    return create(path, bufSize, false, 0, 0, props);
-                else
-                    throw new IgfsPathNotFoundException("Failed to append to file because it doesn't exist: " + path);
-            }
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to append to file because it doesn't exist: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile info(final IgfsPath path) {
-        File file = fileForPath(path);
-
-        if (!file.exists())
-            return null;
-
-        boolean isDir = file.isDirectory();
-
-        PosixFileAttributes attrs = LocalFileSystemUtils.posixAttributes(file);
-
-        Map<String, String> props = LocalFileSystemUtils.posixAttributesToMap(attrs);
-
-        BasicFileAttributes basicAttrs = LocalFileSystemUtils.basicAttributes(file);
-
-        if (isDir) {
-            return new LocalFileSystemIgfsFile(path, false, true, 0,
-                basicAttrs.lastAccessTime().toMillis(), basicAttrs.lastModifiedTime().toMillis(), 0, props);
-        }
-        else {
-            return new LocalFileSystemIgfsFile(path, file.isFile(), false, 0,
-                basicAttrs.lastAccessTime().toMillis(), basicAttrs.lastModifiedTime().toMillis(), file.length(), props);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public long usedSpaceSize() {
-        Path p = fileForPath(IgfsPath.ROOT).toPath();
-
-        try {
-            LocalFileSystemSizeVisitor visitor = new LocalFileSystemSizeVisitor();
-
-            Files.walkFileTree(p, visitor);
-
-            return visitor.size();
-        }
-        catch (IOException e) {
-            throw new IgfsException("Failed to calculate used space size.", e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setTimes(IgfsPath path, long modificationTime, long accessTime) throws IgniteException {
-        Path p = fileForPath(path).toPath();
-
-        if (!Files.exists(p))
-            throw new IgfsPathNotFoundException("Failed to set times (path not found): " + path);
-
-        try {
-            Files.getFileAttributeView(p, BasicFileAttributeView.class)
-                .setTimes(
-                    (modificationTime >= 0) ? FileTime.from(modificationTime, TimeUnit.MILLISECONDS) : null,
-                    (accessTime >= 0) ? FileTime.from(accessTime, TimeUnit.MILLISECONDS) : null,
-                    null);
-        }
-        catch (IOException e) {
-            throw new IgniteException("Failed to set times for path: " + path, e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void start() throws IgniteException {
-        if (workDir != null)
-            workDir = new File(workDir).getAbsolutePath();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void stop() throws IgniteException {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len,
-        long maxLen) throws IgniteException {
-        File f = fileForPath(path);
-
-        if (!f.exists())
-            throw new IgfsPathNotFoundException("File not found: " + path);
-
-        // Create fake block & fake affinity for blocks
-        long blockSize = igfs.configuration().getBlockSize();
-
-        if (maxLen <= 0)
-            maxLen = Long.MAX_VALUE;
-
-        assert maxLen > 0 : "maxLen : " + maxLen;
-
-        long end = start + len;
-
-        Collection<IgfsBlockLocation> blocks = new ArrayList<>((int)(len / maxLen));
-
-        IgfsDataManager data = igfs.context().data();
-
-        Collection<ClusterNode> lastNodes = null;
-
-        long lastBlockIdx = -1;
-
-        IgfsBlockLocationImpl lastBlock = null;
-
-        for (long offset = start; offset < end; ) {
-            long blockIdx = offset / blockSize;
-
-            // Each step is min of maxLen and end of block.
-            long lenStep = Math.min(
-                maxLen - (lastBlock != null ? lastBlock.length() : 0),
-                (blockIdx + 1) * blockSize - offset);
-
-            lenStep = Math.min(lenStep, end - offset);
-
-            // Create fake affinity key to map blocks of secondary filesystem to nodes.
-            LocalFileSystemBlockKey affKey = new LocalFileSystemBlockKey(path, blockIdx);
-
-            if (blockIdx != lastBlockIdx) {
-                Collection<ClusterNode> nodes = data.affinityNodes(affKey);
-
-                if (!nodes.equals(lastNodes) && lastNodes != null && lastBlock != null) {
-                    blocks.add(lastBlock);
-
-                    lastBlock = null;
-                }
-
-                lastNodes = nodes;
-
-                lastBlockIdx = blockIdx;
-            }
-
-            if (lastBlock == null)
-                lastBlock = new IgfsBlockLocationImpl(offset, lenStep, lastNodes);
-            else
-                lastBlock.increaseLength(lenStep);
-
-            if (lastBlock.length() == maxLen || lastBlock.start() + lastBlock.length() == end) {
-                blocks.add(lastBlock);
-
-                lastBlock = null;
-            }
-
-            offset += lenStep;
-       }
-
-        return blocks;
-    }
-
-    /**
-     * Get work directory.
-     *
-     * @return Work directory.
-     */
-    @Nullable public String getWorkDirectory() {
-        return workDir;
-    }
-
-    /**
-     * Set work directory.
-     *
-     * @param workDir Work directory.
-     */
-    public void setWorkDirectory(@Nullable String workDir) {
-        this.workDir = workDir;
-    }
-
-    /**
-     * Create file for IGFS path.
-     *
-     * @param path IGFS path.
-     * @return File object.
-     */
-    private File fileForPath(IgfsPath path) {
-        if (workDir == null)
-            return new File(path.toString());
-        else {
-            if ("/".equals(path.toString()))
-                return new File(workDir);
-            else
-                return new File(workDir, path.toString());
-        }
-    }
-
-    /**
-     * Create IGFS path for file.
-     *
-     * @param f File object.
-     * @return IFGS path.
-     * @throws IgfsException If failed.
-     */
-    private IgfsPath igfsPath(File f) throws IgfsException {
-        String path = f.getAbsolutePath();
-
-        if (workDir != null) {
-            if (!path.startsWith(workDir))
-                throw new IgfsException("Path is not located in the work directory [workDir=" + workDir +
-                    ", path=" + path + ']');
-
-            path = path.substring(workDir.length(), path.length());
-        }
-
-        return new IgfsPath(path);
-    }
-
-    /**
-     * Internal create routine.
-     *
-     * @param path Path.
-     * @param overwrite Overwrite flag.
-     * @return Output stream.
-     */
-    private OutputStream create0(IgfsPath path, boolean overwrite) {
-        File file = fileForPath(path);
-
-        boolean exists = file.exists();
-
-        if (exists) {
-            if (!overwrite)
-                throw new IgfsPathAlreadyExistsException("Failed to create a file because it already exists: " + path);
-        }
-        else {
-            File parent = file.getParentFile();
-
-            if (!mkdirs0(parent))
-                throw new IgfsException("Failed to create parent directory for file (underlying file system " +
-                    "returned false): " + path);
-        }
-
-        try {
-            return new FileOutputStream(file);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to create file [path=" + path + ", overwrite=" + overwrite + ']');
-        }
-    }
-
-    /**
-     * Update path properties if needed.
-     *
-     * @param path IGFS path
-     * @param props Properties map.
-     */
-    private void updatePropertiesIfNeeded(IgfsPath path, Map<String, String> props) {
-        if (props == null || props.isEmpty())
-            return;
-
-        File file = fileForPath(path);
-
... 160918 lines suppressed ...