You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by vo...@apache.org on 2015/03/04 16:35:46 UTC

[44/45] incubator-ignite git commit: IGNITE-386: Squashed changes.

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
new file mode 100644
index 0000000..f679fc0
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java
@@ -0,0 +1,805 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.configuration;
+
+import org.apache.ignite.igfs.*;
+import org.apache.ignite.igfs.secondary.*;
+import org.apache.ignite.internal.util.typedef.internal.*;
+import org.jetbrains.annotations.*;
+
+import java.util.*;
+import java.util.concurrent.*;
+
+/**
+ * {@code IGFS} configuration. More than one file system can be configured within grid.
+ * {@code IGFS} configuration is provided via {@link IgniteConfiguration#getFileSystemConfiguration()}
+ * method.
+ */
+public class FileSystemConfiguration {
+    /** Default file system user name. */
+    public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
+
+    /** Default IPC port. */
+    public static final int DFLT_IPC_PORT = 10500;
+
+    /** Default fragmentizer throttling block length. */
+    public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024;
+
+    /** Default fragmentizer throttling delay. */
+    public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
+
+    /** Default fragmentizer concurrent files. */
+    public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
+
+    /** Default fragmentizer local writes ratio. */
+    public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
+
+    /** Fragmentizer enabled property. */
+    public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
+
+    /** Default batch size for logging. */
+    public static final int DFLT_IGFS_LOG_BATCH_SIZE = 100;
+
+    /** Default {@code IGFS} log directory. */
+    public static final String DFLT_IGFS_LOG_DIR = "work/igfs/log";
+
+    /** Default per node buffer size. */
+    public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
+
+    /** Default number of per node parallel operations. */
+    public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
+
+    /** Default IGFS mode. */
+    public static final IgfsMode DFLT_MODE = IgfsMode.DUAL_ASYNC;
+
+    /** Default file's data block size (bytes). */
+    public static final int DFLT_BLOCK_SIZE = 1 << 16;
+
+    /** Default read/write buffers size (bytes). */
+    public static final int DFLT_BUF_SIZE = 1 << 16;
+
+    /** Default trash directory purge await timeout in case data cache oversize is detected. */
+    public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
+
+    /** Default management port. */
+    public static final int DFLT_MGMT_PORT = 11400;
+
+    /** Default IPC endpoint enabled flag. */
+    public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
+
+    /** IGFS instance name. */
+    private String name;
+
+    /** Cache name to store IGFS meta information. */
+    private String metaCacheName;
+
+    /** Cache name to store file's data blocks. */
+    private String dataCacheName;
+
+    /** File's data block size (bytes). */
+    private int blockSize = DFLT_BLOCK_SIZE;
+
+    /** The number of pre-fetched blocks if specific file's chunk is requested. */
+    private int prefetchBlocks;
+
+    /** Amount of sequential block reads before prefetch is triggered. */
+    private int seqReadsBeforePrefetch;
+
+    /** Read/write buffers size for stream operations (bytes). */
+    private int bufSize = DFLT_BUF_SIZE;
+
+    /** Per node buffer size. */
+    private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
+
+    /** Per node parallel operations. */
+    private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
+
+    /** IPC endpoint properties to publish IGFS over. */
+    private Map<String, String> ipcEndpointCfg;
+
+    /** IPC endpoint enabled flag. */
+    private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
+
+    /** Management port. */
+    private int mgmtPort = DFLT_MGMT_PORT;
+
+    /** Secondary file system */
+    private IgfsSecondaryFileSystem secondaryFs;
+
+    /** IGFS mode. */
+    private IgfsMode dfltMode = DFLT_MODE;
+
+    /** Fragmentizer throttling block length. */
+    private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
+
+    /** Fragmentizer throttling delay. */
+    private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
+
+    /** Fragmentizer concurrent files. */
+    private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
+
+    /** Fragmentizer local writes ratio. */
+    private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
+
+    /** Fragmentizer enabled flag. */
+    private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
+
+    /** Path modes. */
+    private Map<String, IgfsMode> pathModes;
+
+    /** Maximum space. */
+    private long maxSpace;
+
+    /** Trash purge await timeout. */
+    private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
+
+    /** Dual mode PUT operations executor service. */
+    private ExecutorService dualModePutExec;
+
+    /** Dual mode PUT operations executor service shutdown flag. */
+    private boolean dualModePutExecShutdown;
+
+    /** Maximum amount of data in pending puts. */
+    private long dualModeMaxPendingPutsSize;
+
+    /** Maximum range length. */
+    private long maxTaskRangeLen;
+
+    /**
+     * Constructs default configuration.
+     */
+    public FileSystemConfiguration() {
+        // No-op.
+    }
+
+    /**
+     * Constructs the copy of the configuration.
+     *
+     * @param cfg Configuration to copy.
+     */
+    public FileSystemConfiguration(FileSystemConfiguration cfg) {
+        assert cfg != null;
+
+        /*
+         * Must preserve alphabetical order!
+         */
+        blockSize = cfg.getBlockSize();
+        bufSize = cfg.getStreamBufferSize();
+        dataCacheName = cfg.getDataCacheName();
+        dfltMode = cfg.getDefaultMode();
+        dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
+        dualModePutExec = cfg.getDualModePutExecutorService();
+        dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
+        fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
+        fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
+        fragmentizerEnabled = cfg.isFragmentizerEnabled();
+        fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
+        fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
+        secondaryFs = cfg.getSecondaryFileSystem();
+        ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
+        ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
+        maxSpace = cfg.getMaxSpaceSize();
+        maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
+        metaCacheName = cfg.getMetaCacheName();
+        mgmtPort = cfg.getManagementPort();
+        name = cfg.getName();
+        pathModes = cfg.getPathModes();
+        perNodeBatchSize = cfg.getPerNodeBatchSize();
+        perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
+        prefetchBlocks = cfg.getPrefetchBlocks();
+        seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
+        trashPurgeTimeout = cfg.getTrashPurgeTimeout();
+    }
+
+    /**
+     * Gets IGFS instance name. If {@code null}, then instance with default
+     * name will be used.
+     *
+     * @return IGFS instance name.
+     */
+    @Nullable public String getName() {
+        return name;
+    }
+
+    /**
+     * Sets IGFS instance name.
+     *
+     * @param name IGFS instance name.
+     */
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    /**
+     * Cache name to store IGFS meta information. If {@code null}, then instance
+     * with default meta-cache name will be used.
+     *
+     * @return Cache name to store IGFS meta information.
+     */
+    @Nullable public String getMetaCacheName() {
+        return metaCacheName;
+    }
+
+    /**
+     * Sets cache name to store IGFS meta information.
+     *
+     * @param metaCacheName Cache name to store IGFS meta information.
+     */
+    public void setMetaCacheName(String metaCacheName) {
+        this.metaCacheName = metaCacheName;
+    }
+
+    /**
+     * Cache name to store IGFS data.
+     *
+     * @return Cache name to store IGFS data.
+     */
+    @Nullable public String getDataCacheName() {
+        return dataCacheName;
+    }
+
+    /**
+     * Sets cache name to store IGFS data.
+     *
+     * @param dataCacheName Cache name to store IGFS data.
+     */
+    public void setDataCacheName(String dataCacheName) {
+        this.dataCacheName = dataCacheName;
+    }
+
+    /**
+     * Get file's data block size.
+     *
+     * @return File's data block size.
+     */
+    public int getBlockSize() {
+        return blockSize;
+    }
+
+    /**
+     * Sets file's data block size.
+     *
+     * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
+     */
+    public void setBlockSize(int blockSize) {
+        A.ensure(blockSize >= 0, "blockSize >= 0");
+
+        this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
+    }
+
+    /**
+     * Get number of pre-fetched blocks if specific file's chunk is requested.
+     *
+     * @return The number of pre-fetched blocks.
+     */
+    public int getPrefetchBlocks() {
+        return prefetchBlocks;
+    }
+
+    /**
+     * Sets the number of pre-fetched blocks if specific file's chunk is requested.
+     *
+     * @param prefetchBlocks New number of pre-fetched blocks.
+     */
+    public void setPrefetchBlocks(int prefetchBlocks) {
+        A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
+
+        this.prefetchBlocks = prefetchBlocks;
+    }
+
+    /**
+     * Get amount of sequential block reads before prefetch is triggered. The
+     * higher this value, the longer IGFS will wait before starting to prefetch
+     * values ahead of time. Depending on the use case, this can either help
+     * or hurt performance.
+     * <p>
+     * Default is {@code 0} which means that pre-fetching will start right away.
+     * <h1 class="header">Integration With Hadoop</h1>
+     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+     * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
+     * MapReduce task.
+     * <p>
+     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+     *
+     * @return Amount of sequential block reads.
+     */
+    public int getSequentialReadsBeforePrefetch() {
+        return seqReadsBeforePrefetch;
+    }
+
+    /**
+     * Sets amount of sequential block reads before prefetch is triggered. The
+     * higher this value, the longer IGFS will wait before starting to prefetch
+     * values ahead of time. Depending on the use case, this can either help
+     * or hurt performance.
+     * <p>
+     * Default is {@code 0} which means that pre-fetching will start right away.
+     * <h1 class="header">Integration With Hadoop</h1>
+     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
+     * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop
+     * MapReduce task.
+     * <p>
+     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
+     *
+     * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
+     */
+    public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
+        A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
+
+        this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
+    }
+
+    /**
+     * Get read/write buffer size for {@code IGFS} stream operations in bytes.
+     *
+     * @return Read/write buffers size (bytes).
+     */
+    public int getStreamBufferSize() {
+        return bufSize;
+    }
+
+    /**
+     * Sets read/write buffers size for {@code IGFS} stream operations (bytes).
+     *
+     * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
+     */
+    public void setStreamBufferSize(int bufSize) {
+        A.ensure(bufSize >= 0, "bufSize >= 0");
+
+        this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
+    }
+
+    /**
+     * Gets number of file blocks buffered on local node before sending batch to remote node.
+     *
+     * @return Per node buffer size.
+     */
+    public int getPerNodeBatchSize() {
+        return perNodeBatchSize;
+    }
+
+    /**
+     * Sets number of file blocks collected on local node before sending batch to remote node.
+     *
+     * @param perNodeBatchSize Per node buffer size.
+     */
+    public void setPerNodeBatchSize(int perNodeBatchSize) {
+        this.perNodeBatchSize = perNodeBatchSize;
+    }
+
+    /**
+     * Gets number of batches that can be concurrently sent to remote node.
+     *
+     * @return Number of batches for each node.
+     */
+    public int getPerNodeParallelBatchCount() {
+        return perNodeParallelBatchCnt;
+    }
+
+    /**
+     * Sets number of file block batches that can be concurrently sent to remote node.
+     *
+     * @param perNodeParallelBatchCnt Per node parallel load operations.
+     */
+    public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
+        this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
+    }
+
+    /**
+     * Gets map of IPC endpoint configuration properties. There are 2 different
+     * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
+     * <p>
+     * The following configuration properties are supported for {@code shared-memory}
+     * endpoint:
+     * <ul>
+     *     <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li>
+     *     <li>{@code port} - endpoint port.</li>
+     *     <li>{@code size} - memory size allocated for single endpoint communication.</li>
+     *     <li>
+     *         {@code tokenDirectoryPath} - path, either absolute or relative to {@code IGNITE_HOME} to
+     *         store shared memory tokens.
+     *     </li>
+     * </ul>
+     * <p>
+     * The following configuration properties are supported for {@code TCP} approach:
+     * <ul>
+     *     <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li>
+     *     <li>{@code port} - endpoint bind port.</li>
+     *     <li>
+     *         {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used.
+     *     </li>
+     * </ul>
+     * <p>
+     * Note that {@code shared-memory} approach is not supported on Windows environments.
+     * In case IGFS is failed to bind to particular port, further attempts will be performed every 3 seconds.
+     *
+     * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default
+     * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}.
+     */
+    @Nullable public Map<String,String> getIpcEndpointConfiguration() {
+        return ipcEndpointCfg;
+    }
+
+    /**
+     * Sets IPC endpoint configuration to publish IGFS over.
+     *
+     * @param ipcEndpointCfg Map of IPC endpoint config properties.
+     */
+    public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) {
+        this.ipcEndpointCfg = ipcEndpointCfg;
+    }
+
+    /**
+     * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
+     * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
+     *
+     * @return {@code True} in case endpoint is enabled.
+     */
+    public boolean isIpcEndpointEnabled() {
+        return ipcEndpointEnabled;
+    }
+
+    /**
+     * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
+     *
+     * @param ipcEndpointEnabled IPC endpoint enabled flag.
+     */
+    public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
+        this.ipcEndpointEnabled = ipcEndpointEnabled;
+    }
+
+    /**
+     * Gets port number for management endpoint. All IGFS nodes should have this port open
+     * for Visor Management Console to work with IGFS.
+     * <p>
+     * Default value is {@link #DFLT_MGMT_PORT}
+     *
+     * @return Port number or {@code -1} if management endpoint should be disabled.
+     */
+    public int getManagementPort() {
+        return mgmtPort;
+    }
+
+    /**
+     * Sets management endpoint port.
+     *
+     * @param mgmtPort port number or {@code -1} to disable management endpoint.
+     */
+    public void setManagementPort(int mgmtPort) {
+        this.mgmtPort = mgmtPort;
+    }
+
+    /**
+     * Gets mode to specify how {@code IGFS} interacts with Hadoop file system, like {@code HDFS}.
+     * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
+     * purposes.
+     * <p>
+     * Default mode is {@link org.apache.ignite.igfs.IgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
+     * not configured, this mode will work just like {@link org.apache.ignite.igfs.IgfsMode#PRIMARY} mode.
+     *
+     * @return Mode to specify how IGFS interacts with secondary HDFS file system.
+     */
+    public IgfsMode getDefaultMode() {
+        return dfltMode;
+    }
+
+    /**
+     * Sets {@code IGFS} mode to specify how it should interact with secondary
+     * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
+     * for pass-through, write-through, and read-through purposes.
+     *
+     * @param dfltMode {@code IGFS} mode.
+     */
+    public void setDefaultMode(IgfsMode dfltMode) {
+        this.dfltMode = dfltMode;
+    }
+
+    /**
+     * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
+     * and read-through purposes.
+     *
+     * @return Secondary file system.
+     */
+    public IgfsSecondaryFileSystem getSecondaryFileSystem() {
+        return secondaryFs;
+    }
+
+    /**
+     * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
+     * and read-through purposes.
+     *
+     * @param fileSystem
+     */
+    public void setSecondaryFileSystem(IgfsSecondaryFileSystem fileSystem) {
+        secondaryFs = fileSystem;
+    }
+
+    /**
+     * Gets map of path prefixes to {@code IGFS} modes used for them.
+     * <p>
+     * If path doesn't correspond to any specified prefix or mappings are not provided, then
+     * {@link #getDefaultMode()} is used.
+     * <p>
+     * Several folders under {@code '/apache/ignite'} folder have predefined mappings which cannot be overridden.
+     * <li>{@code /apache/ignite/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li>
+     * <p>
+     * And in case secondary file system URI is provided:
+     * <li>{@code /apache/ignite/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li>
+     * <li>{@code /apache/ignite/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li>
+     * <li>{@code /apache/ignite/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li>
+     *
+     * @return Map of paths to {@code IGFS} modes.
+     */
+    @Nullable public Map<String, IgfsMode> getPathModes() {
+        return pathModes;
+    }
+
+    /**
+     * Sets map of path prefixes to {@code IGFS} modes used for them.
+     * <p>
+     * If path doesn't correspond to any specified prefix or mappings are not provided, then
+     * {@link #getDefaultMode()} is used.
+     *
+     * @param pathModes Map of paths to {@code IGFS} modes.
+     */
+    public void setPathModes(Map<String, IgfsMode> pathModes) {
+        this.pathModes = pathModes;
+    }
+
+    /**
+     * Gets the length of file chunk to send before delaying the fragmentizer.
+     *
+     * @return File chunk length in bytes.
+     */
+    public long getFragmentizerThrottlingBlockLength() {
+        return fragmentizerThrottlingBlockLen;
+    }
+
+    /**
+     * Sets length of file chunk to transmit before throttling is delayed.
+     *
+     * @param fragmentizerThrottlingBlockLen Block length in bytes.
+     */
+    public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
+        this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
+    }
+
+    /**
+     * Gets throttle delay for fragmentizer.
+     *
+     * @return Throttle delay in milliseconds.
+     */
+    public long getFragmentizerThrottlingDelay() {
+        return fragmentizerThrottlingDelay;
+    }
+
+    /**
+     * Sets delay in milliseconds for which fragmentizer is paused.
+     *
+     * @param fragmentizerThrottlingDelay Delay in milliseconds.
+     */
+    public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
+        this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
+    }
+
+    /**
+     * Gets number of files that can be processed by fragmentizer concurrently.
+     *
+     * @return Number of files to process concurrently.
+     */
+    public int getFragmentizerConcurrentFiles() {
+        return fragmentizerConcurrentFiles;
+    }
+
+    /**
+     * Sets number of files to process concurrently by fragmentizer.
+     *
+     * @param fragmentizerConcurrentFiles Number of files to process concurrently.
+     */
+    public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
+        this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
+    }
+
+    /**
+     * Gets amount of local memory (in % of local IGFS max space size) available for local writes
+     * during file creation.
+     * <p>
+     * If current IGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize},
+     * then file blocks will be written to the local node first and then asynchronously distributed
+     * among cluster nodes (fragmentized).
+     * <p>
+     * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
+     *
+     * @return Ratio for local writes space.
+     */
+    public float getFragmentizerLocalWritesRatio() {
+        return fragmentizerLocWritesRatio;
+    }
+
+    /**
+     * Sets ratio for space available for local file writes.
+     *
+     * @param fragmentizerLocWritesRatio Ratio for local file writes.
+     * @see #getFragmentizerLocalWritesRatio()
+     */
+    public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) {
+        this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
+    }
+
+    /**
+     * Gets flag indicating whether IGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
+     * written in distributed fashion.
+     *
+     * @return Flag indicating whether fragmentizer is enabled.
+     */
+    public boolean isFragmentizerEnabled() {
+        return fragmentizerEnabled;
+    }
+
+    /**
+     * Sets property indicating whether fragmentizer is enabled.
+     *
+     * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
+     */
+    public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
+        this.fragmentizerEnabled = fragmentizerEnabled;
+    }
+
+    /**
+     * Get maximum space available for data cache to store file system entries.
+     *
+     * @return Maximum space available for data cache.
+     */
+    public long getMaxSpaceSize() {
+        return maxSpace;
+    }
+
+    /**
+     * Set maximum space in bytes available in data cache.
+     *
+     * @param maxSpace Maximum space available in data cache.
+     */
+    public void setMaxSpaceSize(long maxSpace) {
+        this.maxSpace = maxSpace;
+    }
+
+    /**
+     * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     *
+     * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     */
+    public long getTrashPurgeTimeout() {
+        return trashPurgeTimeout;
+    }
+
+    /**
+     * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     *
+     * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected.
+     */
+    public void setTrashPurgeTimeout(long trashPurgeTimeout) {
+        this.trashPurgeTimeout = trashPurgeTimeout;
+    }
+
+    /**
+     * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for
+     * data which came from the secondary file system and about to be written to IGFS data cache.
+     * In case no executor service is provided, default one will be created with maximum amount of threads equals
+     * to amount of processor cores.
+     *
+     * @return Get DUAL mode put operation executor service
+     */
+    @Nullable public ExecutorService getDualModePutExecutorService() {
+        return dualModePutExec;
+    }
+
+    /**
+     * Set DUAL mode put operations executor service.
+     *
+     * @param dualModePutExec Dual mode put operations executor service.
+     */
+    public void setDualModePutExecutorService(ExecutorService dualModePutExec) {
+        this.dualModePutExec = dualModePutExec;
+    }
+
+    /**
+     * Get DUAL mode put operation executor service shutdown flag.
+     *
+     * @return DUAL mode put operation executor service shutdown flag.
+     */
+    public boolean getDualModePutExecutorServiceShutdown() {
+        return dualModePutExecShutdown;
+    }
+
+    /**
+     * Set DUAL mode put operations executor service shutdown flag.
+     *
+     * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag.
+     */
+    public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) {
+        this.dualModePutExecShutdown = dualModePutExecShutdown;
+    }
+
+    /**
+     * Get maximum amount of pending data read from the secondary file system and waiting to be written to data
+     * cache. {@code 0} or negative value stands for unlimited size.
+     * <p>
+     * By default this value is set to {@code 0}. It is recommended to set positive value in case your
+     * application performs frequent reads of large amount of data from the secondary file system in order to
+     * avoid issues with increasing GC pauses or out-of-memory error.
+     *
+     * @return Maximum amount of pending data read from the secondary file system
+     */
+    public long getDualModeMaxPendingPutsSize() {
+        return dualModeMaxPendingPutsSize;
+    }
+
+    /**
+     * Set maximum amount of data in pending put operations.
+     *
+     * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations.
+     */
+    public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) {
+        this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
+    }
+
+    /**
+     * Get maximum default range size of a file being split during IGFS task execution. When IGFS task is about to
+     * be executed, it requests file block locations first. Each location is defined as {@link org.apache.ignite.igfs.mapreduce.IgfsFileRange} which
+     * has length. In case this parameter is set to positive value, then IGFS will split single file range into smaller
+     * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
+     * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
+     * block size.
+     * <p>
+     * Note that this parameter is applied when task is split into jobs before {@link org.apache.ignite.igfs.mapreduce.IgfsRecordResolver} is
+     * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
+     * parameter depending on file data layout and selected resolver type.
+     * <p>
+     * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
+     * so that task execution suffers from insufficient parallelism. E.g., in case you have one IGFS node in topology
+     * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
+     * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
+     * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
+     * parallel.
+     * <p>
+     * Note that some {@code IgniteFs.execute()} methods can override value of this parameter.
+     * <p>
+     * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
+     * {@code 0}.
+     *
+     * @return Maximum range size of a file being split during IGFS task execution.
+     */
+    public long getMaximumTaskRangeLength() {
+        return maxTaskRangeLen;
+    }
+
+    /**
+     * Set maximum default range size of a file being split during IGFS task execution.
+     * See {@link #getMaximumTaskRangeLength()} for more details.
+     *
+     * @param maxTaskRangeLen Set maximum default range size of a file being split during IGFS task execution.
+     */
+    public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
+        this.maxTaskRangeLen = maxTaskRangeLen;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(FileSystemConfiguration.class, this);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
new file mode 100644
index 0000000..01ef8b0
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.configuration;
+
+import org.apache.ignite.internal.processors.hadoop.*;
+import org.apache.ignite.internal.util.typedef.internal.*;
+
+/**
+ * Ignite Hadoop Accelerator configuration.
+ */
+public class HadoopConfiguration {
+    /** Default finished job info time-to-live. */
+    public static final long DFLT_FINISHED_JOB_INFO_TTL = 10_000;
+
+    /** Default value for external execution flag. */
+    public static final boolean DFLT_EXTERNAL_EXECUTION = false;
+
+    /** Default value for the max parallel tasks. */
+    public static final int DFLT_MAX_PARALLEL_TASKS = Runtime.getRuntime().availableProcessors();
+
+    /** Default value for the max task queue size. */
+    public static final int DFLT_MAX_TASK_QUEUE_SIZE = 1000;
+
+    /** Map reduce planner. */
+    private HadoopMapReducePlanner planner;
+
+    /** */
+    private boolean extExecution = DFLT_EXTERNAL_EXECUTION;
+
+    /** Finished job info TTL. */
+    private long finishedJobInfoTtl = DFLT_FINISHED_JOB_INFO_TTL;
+
+    /** */
+    private int maxParallelTasks = DFLT_MAX_PARALLEL_TASKS;
+
+    /** */
+    private int maxTaskQueueSize = DFLT_MAX_TASK_QUEUE_SIZE;
+
+    /**
+     * Default constructor.
+     */
+    public HadoopConfiguration() {
+        // No-op.
+    }
+
+    /**
+     * Copy constructor.
+     *
+     * @param cfg Configuration to copy.
+     */
+    public HadoopConfiguration(HadoopConfiguration cfg) {
+        // Preserve alphabetic order.
+        extExecution = cfg.isExternalExecution();
+        finishedJobInfoTtl = cfg.getFinishedJobInfoTtl();
+        planner = cfg.getMapReducePlanner();
+        maxParallelTasks = cfg.getMaxParallelTasks();
+        maxTaskQueueSize = cfg.getMaxTaskQueueSize();
+    }
+
+    /**
+     * Gets max number of local tasks that may be executed in parallel.
+     *
+     * @return Max number of local tasks that may be executed in parallel.
+     */
+    public int getMaxParallelTasks() {
+        return maxParallelTasks;
+    }
+
+    /**
+     * Sets max number of local tasks that may be executed in parallel.
+     *
+     * @param maxParallelTasks Max number of local tasks that may be executed in parallel.
+     */
+    public void setMaxParallelTasks(int maxParallelTasks) {
+        this.maxParallelTasks = maxParallelTasks;
+    }
+
+    /**
+     * Gets max task queue size.
+     *
+     * @return Max task queue size.
+     */
+    public int getMaxTaskQueueSize() {
+        return maxTaskQueueSize;
+    }
+
+    /**
+     * Sets max task queue size.
+     *
+     * @param maxTaskQueueSize Max task queue size.
+     */
+    public void setMaxTaskQueueSize(int maxTaskQueueSize) {
+        this.maxTaskQueueSize = maxTaskQueueSize;
+    }
+
+    /**
+     * Gets finished job info time-to-live in milliseconds.
+     *
+     * @return Finished job info time-to-live.
+     */
+    public long getFinishedJobInfoTtl() {
+        return finishedJobInfoTtl;
+    }
+
+    /**
+     * Sets finished job info time-to-live.
+     *
+     * @param finishedJobInfoTtl Finished job info time-to-live.
+     */
+    public void setFinishedJobInfoTtl(long finishedJobInfoTtl) {
+        this.finishedJobInfoTtl = finishedJobInfoTtl;
+    }
+
+    /**
+     * Gets external task execution flag. If {@code true}, hadoop job tasks will be executed in an external
+     * (relative to node) process.
+     *
+     * @return {@code True} if external execution.
+     */
+    public boolean isExternalExecution() {
+        return extExecution;
+    }
+
+    /**
+     * Sets external task execution flag.
+     *
+     * @param extExecution {@code True} if tasks should be executed in an external process.
+     * @see #isExternalExecution()
+     */
+    public void setExternalExecution(boolean extExecution) {
+        this.extExecution = extExecution;
+    }
+
+    /**
+     * Gets Hadoop map-reduce planner, a component which defines job execution plan based on job
+     * configuration and current grid topology.
+     *
+     * @return Map-reduce planner.
+     */
+    public HadoopMapReducePlanner getMapReducePlanner() {
+        return planner;
+    }
+
+    /**
+     * Sets Hadoop map-reduce planner, a component which defines job execution plan based on job
+     * configuration and current grid topology.
+     *
+     * @param planner Map-reduce planner.
+     */
+    public void setMapReducePlanner(HadoopMapReducePlanner planner) {
+        this.planner = planner;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopConfiguration.class, this, super.toString());
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
deleted file mode 100644
index 308471d..0000000
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java
+++ /dev/null
@@ -1,807 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.configuration;
-
-import org.apache.ignite.igfs.*;
-import org.apache.ignite.internal.util.typedef.internal.*;
-import org.jetbrains.annotations.*;
-
-import java.util.*;
-import java.util.concurrent.*;
-
-/**
- * {@code IGFS} configuration. More than one file system can be configured within grid.
- * {@code IGFS} configuration is provided via {@link org.apache.ignite.configuration.IgniteConfiguration#getIgfsConfiguration()}
- * method.
- * <p>
- * Refer to {@code config/hadoop/default-config.xml} or {@code config/hadoop/default-config-client.xml}
- * configuration files under Ignite installation to see sample {@code IGFS} configuration.
- */
-public class IgfsConfiguration {
-    /** Default file system user name. */
-    public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous");
-
-    /** Default IPC port. */
-    public static final int DFLT_IPC_PORT = 10500;
-
-    /** Default fragmentizer throttling block length. */
-    public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024;
-
-    /** Default fragmentizer throttling delay. */
-    public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
-
-    /** Default fragmentizer concurrent files. */
-    public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
-
-    /** Default fragmentizer local writes ratio. */
-    public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
-
-    /** Fragmentizer enabled property. */
-    public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
-
-    /** Default batch size for logging. */
-    public static final int DFLT_IGFS_LOG_BATCH_SIZE = 100;
-
-    /** Default {@code IGFS} log directory. */
-    public static final String DFLT_IGFS_LOG_DIR = "work/igfs/log";
-
-    /** Default per node buffer size. */
-    public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
-
-    /** Default number of per node parallel operations. */
-    public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
-
-    /** Default IGFS mode. */
-    public static final IgfsMode DFLT_MODE = IgfsMode.DUAL_ASYNC;
-
-    /** Default file's data block size (bytes). */
-    public static final int DFLT_BLOCK_SIZE = 1 << 16;
-
-    /** Default read/write buffers size (bytes). */
-    public static final int DFLT_BUF_SIZE = 1 << 16;
-
-    /** Default trash directory purge await timeout in case data cache oversize is detected. */
-    public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
-
-    /** Default management port. */
-    public static final int DFLT_MGMT_PORT = 11400;
-
-    /** Default IPC endpoint enabled flag. */
-    public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
-
-    /** IGFS instance name. */
-    private String name;
-
-    /** Cache name to store IGFS meta information. */
-    private String metaCacheName;
-
-    /** Cache name to store file's data blocks. */
-    private String dataCacheName;
-
-    /** File's data block size (bytes). */
-    private int blockSize = DFLT_BLOCK_SIZE;
-
-    /** The number of pre-fetched blocks if specific file's chunk is requested. */
-    private int prefetchBlocks;
-
-    /** Amount of sequential block reads before prefetch is triggered. */
-    private int seqReadsBeforePrefetch;
-
-    /** Read/write buffers size for stream operations (bytes). */
-    private int bufSize = DFLT_BUF_SIZE;
-
-    /** Per node buffer size. */
-    private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
-
-    /** Per node parallel operations. */
-    private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
-
-    /** IPC endpoint properties to publish IGFS over. */
-    private Map<String, String> ipcEndpointCfg;
-
-    /** IPC endpoint enabled flag. */
-    private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
-
-    /** Management port. */
-    private int mgmtPort = DFLT_MGMT_PORT;
-
-    /** Secondary file system */
-    private Igfs secondaryFs;
-
-    /** IGFS mode. */
-    private IgfsMode dfltMode = DFLT_MODE;
-
-    /** Fragmentizer throttling block length. */
-    private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
-
-    /** Fragmentizer throttling delay. */
-    private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY;
-
-    /** Fragmentizer concurrent files. */
-    private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES;
-
-    /** Fragmentizer local writes ratio. */
-    private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
-
-    /** Fragmentizer enabled flag. */
-    private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
-
-    /** Path modes. */
-    private Map<String, IgfsMode> pathModes;
-
-    /** Maximum space. */
-    private long maxSpace;
-
-    /** Trash purge await timeout. */
-    private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
-
-    /** Dual mode PUT operations executor service. */
-    private ExecutorService dualModePutExec;
-
-    /** Dual mode PUT operations executor service shutdown flag. */
-    private boolean dualModePutExecShutdown;
-
-    /** Maximum amount of data in pending puts. */
-    private long dualModeMaxPendingPutsSize;
-
-    /** Maximum range length. */
-    private long maxTaskRangeLen;
-
-    /**
-     * Constructs default configuration.
-     */
-    public IgfsConfiguration() {
-        // No-op.
-    }
-
-    /**
-     * Constructs the copy of the configuration.
-     *
-     * @param cfg Configuration to copy.
-     */
-    public IgfsConfiguration(IgfsConfiguration cfg) {
-        assert cfg != null;
-
-        /*
-         * Must preserve alphabetical order!
-         */
-        blockSize = cfg.getBlockSize();
-        bufSize = cfg.getStreamBufferSize();
-        dataCacheName = cfg.getDataCacheName();
-        dfltMode = cfg.getDefaultMode();
-        dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
-        dualModePutExec = cfg.getDualModePutExecutorService();
-        dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
-        fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
-        fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
-        fragmentizerEnabled = cfg.isFragmentizerEnabled();
-        fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength();
-        fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
-        secondaryFs = cfg.getSecondaryFileSystem();
-        ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
-        ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
-        maxSpace = cfg.getMaxSpaceSize();
-        maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
-        metaCacheName = cfg.getMetaCacheName();
-        mgmtPort = cfg.getManagementPort();
-        name = cfg.getName();
-        pathModes = cfg.getPathModes();
-        perNodeBatchSize = cfg.getPerNodeBatchSize();
-        perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
-        prefetchBlocks = cfg.getPrefetchBlocks();
-        seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
-        trashPurgeTimeout = cfg.getTrashPurgeTimeout();
-    }
-
-    /**
-     * Gets IGFS instance name. If {@code null}, then instance with default
-     * name will be used.
-     *
-     * @return IGFS instance name.
-     */
-    @Nullable public String getName() {
-        return name;
-    }
-
-    /**
-     * Sets IGFS instance name.
-     *
-     * @param name IGFS instance name.
-     */
-    public void setName(String name) {
-        this.name = name;
-    }
-
-    /**
-     * Cache name to store IGFS meta information. If {@code null}, then instance
-     * with default meta-cache name will be used.
-     *
-     * @return Cache name to store IGFS meta information.
-     */
-    @Nullable public String getMetaCacheName() {
-        return metaCacheName;
-    }
-
-    /**
-     * Sets cache name to store IGFS meta information.
-     *
-     * @param metaCacheName Cache name to store IGFS meta information.
-     */
-    public void setMetaCacheName(String metaCacheName) {
-        this.metaCacheName = metaCacheName;
-    }
-
-    /**
-     * Cache name to store IGFS data.
-     *
-     * @return Cache name to store IGFS data.
-     */
-    @Nullable public String getDataCacheName() {
-        return dataCacheName;
-    }
-
-    /**
-     * Sets cache name to store IGFS data.
-     *
-     * @param dataCacheName Cache name to store IGFS data.
-     */
-    public void setDataCacheName(String dataCacheName) {
-        this.dataCacheName = dataCacheName;
-    }
-
-    /**
-     * Get file's data block size.
-     *
-     * @return File's data block size.
-     */
-    public int getBlockSize() {
-        return blockSize;
-    }
-
-    /**
-     * Sets file's data block size.
-     *
-     * @param blockSize File's data block size (bytes) or {@code 0} to reset default value.
-     */
-    public void setBlockSize(int blockSize) {
-        A.ensure(blockSize >= 0, "blockSize >= 0");
-
-        this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
-    }
-
-    /**
-     * Get number of pre-fetched blocks if specific file's chunk is requested.
-     *
-     * @return The number of pre-fetched blocks.
-     */
-    public int getPrefetchBlocks() {
-        return prefetchBlocks;
-    }
-
-    /**
-     * Sets the number of pre-fetched blocks if specific file's chunk is requested.
-     *
-     * @param prefetchBlocks New number of pre-fetched blocks.
-     */
-    public void setPrefetchBlocks(int prefetchBlocks) {
-        A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
-
-        this.prefetchBlocks = prefetchBlocks;
-    }
-
-    /**
-     * Get amount of sequential block reads before prefetch is triggered. The
-     * higher this value, the longer IGFS will wait before starting to prefetch
-     * values ahead of time. Depending on the use case, this can either help
-     * or hurt performance.
-     * <p>
-     * Default is {@code 0} which means that pre-fetching will start right away.
-     * <h1 class="header">Integration With Hadoop</h1>
-     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
-     * {@code org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH}
-     * configuration property directly to Hadoop MapReduce task.
-     * <p>
-     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
-     *
-     * @return Amount of sequential block reads.
-     */
-    public int getSequentialReadsBeforePrefetch() {
-        return seqReadsBeforePrefetch;
-    }
-
-    /**
-     * Sets amount of sequential block reads before prefetch is triggered. The
-     * higher this value, the longer IGFS will wait before starting to prefetch
-     * values ahead of time. Depending on the use case, this can either help
-     * or hurt performance.
-     * <p>
-     * Default is {@code 0} which means that pre-fetching will start right away.
-     * <h1 class="header">Integration With Hadoop</h1>
-     * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing
-     * {@code org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH}
-     * configuration property directly to Hadoop MapReduce task.
-     * <p>
-     * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition.
-     *
-     * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered.
-     */
-    public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
-        A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
-
-        this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
-    }
-
-    /**
-     * Get read/write buffer size for {@code IGFS} stream operations in bytes.
-     *
-     * @return Read/write buffers size (bytes).
-     */
-    public int getStreamBufferSize() {
-        return bufSize;
-    }
-
-    /**
-     * Sets read/write buffers size for {@code IGFS} stream operations (bytes).
-     *
-     * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value.
-     */
-    public void setStreamBufferSize(int bufSize) {
-        A.ensure(bufSize >= 0, "bufSize >= 0");
-
-        this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
-    }
-
-    /**
-     * Gets number of file blocks buffered on local node before sending batch to remote node.
-     *
-     * @return Per node buffer size.
-     */
-    public int getPerNodeBatchSize() {
-        return perNodeBatchSize;
-    }
-
-    /**
-     * Sets number of file blocks collected on local node before sending batch to remote node.
-     *
-     * @param perNodeBatchSize Per node buffer size.
-     */
-    public void setPerNodeBatchSize(int perNodeBatchSize) {
-        this.perNodeBatchSize = perNodeBatchSize;
-    }
-
-    /**
-     * Gets number of batches that can be concurrently sent to remote node.
-     *
-     * @return Number of batches for each node.
-     */
-    public int getPerNodeParallelBatchCount() {
-        return perNodeParallelBatchCnt;
-    }
-
-    /**
-     * Sets number of file block batches that can be concurrently sent to remote node.
-     *
-     * @param perNodeParallelBatchCnt Per node parallel load operations.
-     */
-    public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
-        this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
-    }
-
-    /**
-     * Gets map of IPC endpoint configuration properties. There are 2 different
-     * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
-     * <p>
-     * The following configuration properties are supported for {@code shared-memory}
-     * endpoint:
-     * <ul>
-     *     <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li>
-     *     <li>{@code port} - endpoint port.</li>
-     *     <li>{@code size} - memory size allocated for single endpoint communication.</li>
-     *     <li>
-     *         {@code tokenDirectoryPath} - path, either absolute or relative to {@code IGNITE_HOME} to
-     *         store shared memory tokens.
-     *     </li>
-     * </ul>
-     * <p>
-     * The following configuration properties are supported for {@code TCP} approach:
-     * <ul>
-     *     <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li>
-     *     <li>{@code port} - endpoint bind port.</li>
-     *     <li>
-     *         {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used.
-     *     </li>
-     * </ul>
-     * <p>
-     * Note that {@code shared-memory} approach is not supported on Windows environments.
-     * In case IGFS is failed to bind to particular port, further attempts will be performed every 3 seconds.
-     *
-     * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default
-     * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}.
-     */
-    @Nullable public Map<String,String> getIpcEndpointConfiguration() {
-        return ipcEndpointCfg;
-    }
-
-    /**
-     * Sets IPC endpoint configuration to publish IGFS over.
-     *
-     * @param ipcEndpointCfg Map of IPC endpoint config properties.
-     */
-    public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) {
-        this.ipcEndpointCfg = ipcEndpointCfg;
-    }
-
-    /**
-     * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific
-     * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}.
-     *
-     * @return {@code True} in case endpoint is enabled.
-     */
-    public boolean isIpcEndpointEnabled() {
-        return ipcEndpointEnabled;
-    }
-
-    /**
-     * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
-     *
-     * @param ipcEndpointEnabled IPC endpoint enabled flag.
-     */
-    public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
-        this.ipcEndpointEnabled = ipcEndpointEnabled;
-    }
-
-    /**
-     * Gets port number for management endpoint. All IGFS nodes should have this port open
-     * for Visor Management Console to work with IGFS.
-     * <p>
-     * Default value is {@link #DFLT_MGMT_PORT}
-     *
-     * @return Port number or {@code -1} if management endpoint should be disabled.
-     */
-    public int getManagementPort() {
-        return mgmtPort;
-    }
-
-    /**
-     * Sets management endpoint port.
-     *
-     * @param mgmtPort port number or {@code -1} to disable management endpoint.
-     */
-    public void setManagementPort(int mgmtPort) {
-        this.mgmtPort = mgmtPort;
-    }
-
-    /**
-     * Gets mode to specify how {@code IGFS} interacts with Hadoop file system, like {@code HDFS}.
-     * Secondary Hadoop file system is provided for pass-through, write-through, and read-through
-     * purposes.
-     * <p>
-     * Default mode is {@link org.apache.ignite.igfs.IgfsMode#DUAL_ASYNC}. If secondary Hadoop file system is
-     * not configured, this mode will work just like {@link org.apache.ignite.igfs.IgfsMode#PRIMARY} mode.
-     *
-     * @return Mode to specify how IGFS interacts with secondary HDFS file system.
-     */
-    public IgfsMode getDefaultMode() {
-        return dfltMode;
-    }
-
-    /**
-     * Sets {@code IGFS} mode to specify how it should interact with secondary
-     * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided
-     * for pass-through, write-through, and read-through purposes.
-     *
-     * @param dfltMode {@code IGFS} mode.
-     */
-    public void setDefaultMode(IgfsMode dfltMode) {
-        this.dfltMode = dfltMode;
-    }
-
-    /**
-     * Gets the secondary file system. Secondary file system is provided for pass-through, write-through,
-     * and read-through purposes.
-     *
-     * @return Secondary file system.
-     */
-    public Igfs getSecondaryFileSystem() {
-        return secondaryFs;
-    }
-
-    /**
-     * Sets the secondary file system. Secondary file system is provided for pass-through, write-through,
-     * and read-through purposes.
-     *
-     * @param fileSystem
-     */
-    public void setSecondaryFileSystem(Igfs fileSystem) {
-        secondaryFs = fileSystem;
-    }
-
-    /**
-     * Gets map of path prefixes to {@code IGFS} modes used for them.
-     * <p>
-     * If path doesn't correspond to any specified prefix or mappings are not provided, then
-     * {@link #getDefaultMode()} is used.
-     * <p>
-     * Several folders under {@code '/apache/ignite'} folder have predefined mappings which cannot be overridden.
-     * <li>{@code /apache/ignite/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li>
-     * <p>
-     * And in case secondary file system URI is provided:
-     * <li>{@code /apache/ignite/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li>
-     * <li>{@code /apache/ignite/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li>
-     * <li>{@code /apache/ignite/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li>
-     *
-     * @return Map of paths to {@code IGFS} modes.
-     */
-    @Nullable public Map<String, IgfsMode> getPathModes() {
-        return pathModes;
-    }
-
-    /**
-     * Sets map of path prefixes to {@code IGFS} modes used for them.
-     * <p>
-     * If path doesn't correspond to any specified prefix or mappings are not provided, then
-     * {@link #getDefaultMode()} is used.
-     *
-     * @param pathModes Map of paths to {@code IGFS} modes.
-     */
-    public void setPathModes(Map<String, IgfsMode> pathModes) {
-        this.pathModes = pathModes;
-    }
-
-    /**
-     * Gets the length of file chunk to send before delaying the fragmentizer.
-     *
-     * @return File chunk length in bytes.
-     */
-    public long getFragmentizerThrottlingBlockLength() {
-        return fragmentizerThrottlingBlockLen;
-    }
-
-    /**
-     * Sets length of file chunk to transmit before throttling is delayed.
-     *
-     * @param fragmentizerThrottlingBlockLen Block length in bytes.
-     */
-    public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) {
-        this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
-    }
-
-    /**
-     * Gets throttle delay for fragmentizer.
-     *
-     * @return Throttle delay in milliseconds.
-     */
-    public long getFragmentizerThrottlingDelay() {
-        return fragmentizerThrottlingDelay;
-    }
-
-    /**
-     * Sets delay in milliseconds for which fragmentizer is paused.
-     *
-     * @param fragmentizerThrottlingDelay Delay in milliseconds.
-     */
-    public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) {
-        this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
-    }
-
-    /**
-     * Gets number of files that can be processed by fragmentizer concurrently.
-     *
-     * @return Number of files to process concurrently.
-     */
-    public int getFragmentizerConcurrentFiles() {
-        return fragmentizerConcurrentFiles;
-    }
-
-    /**
-     * Sets number of files to process concurrently by fragmentizer.
-     *
-     * @param fragmentizerConcurrentFiles Number of files to process concurrently.
-     */
-    public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) {
-        this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
-    }
-
-    /**
-     * Gets amount of local memory (in % of local IGFS max space size) available for local writes
-     * during file creation.
-     * <p>
-     * If current IGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize},
-     * then file blocks will be written to the local node first and then asynchronously distributed
-     * among cluster nodes (fragmentized).
-     * <p>
-     * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
-     *
-     * @return Ratio for local writes space.
-     */
-    public float getFragmentizerLocalWritesRatio() {
-        return fragmentizerLocWritesRatio;
-    }
-
-    /**
-     * Sets ratio for space available for local file writes.
-     *
-     * @param fragmentizerLocWritesRatio Ratio for local file writes.
-     * @see #getFragmentizerLocalWritesRatio()
-     */
-    public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) {
-        this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
-    }
-
-    /**
-     * Gets flag indicating whether IGFS fragmentizer is enabled. If fragmentizer is disabled, files will be
-     * written in distributed fashion.
-     *
-     * @return Flag indicating whether fragmentizer is enabled.
-     */
-    public boolean isFragmentizerEnabled() {
-        return fragmentizerEnabled;
-    }
-
-    /**
-     * Sets property indicating whether fragmentizer is enabled.
-     *
-     * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
-     */
-    public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
-        this.fragmentizerEnabled = fragmentizerEnabled;
-    }
-
-    /**
-     * Get maximum space available for data cache to store file system entries.
-     *
-     * @return Maximum space available for data cache.
-     */
-    public long getMaxSpaceSize() {
-        return maxSpace;
-    }
-
-    /**
-     * Set maximum space in bytes available in data cache.
-     *
-     * @param maxSpace Maximum space available in data cache.
-     */
-    public void setMaxSpaceSize(long maxSpace) {
-        this.maxSpace = maxSpace;
-    }
-
-    /**
-     * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     *
-     * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     */
-    public long getTrashPurgeTimeout() {
-        return trashPurgeTimeout;
-    }
-
-    /**
-     * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     *
-     * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected.
-     */
-    public void setTrashPurgeTimeout(long trashPurgeTimeout) {
-        this.trashPurgeTimeout = trashPurgeTimeout;
-    }
-
-    /**
-     * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for
-     * data which came from the secondary file system and about to be written to IGFS data cache.
-     * In case no executor service is provided, default one will be created with maximum amount of threads equals
-     * to amount of processor cores.
-     *
-     * @return Get DUAL mode put operation executor service
-     */
-    @Nullable public ExecutorService getDualModePutExecutorService() {
-        return dualModePutExec;
-    }
-
-    /**
-     * Set DUAL mode put operations executor service.
-     *
-     * @param dualModePutExec Dual mode put operations executor service.
-     */
-    public void setDualModePutExecutorService(ExecutorService dualModePutExec) {
-        this.dualModePutExec = dualModePutExec;
-    }
-
-    /**
-     * Get DUAL mode put operation executor service shutdown flag.
-     *
-     * @return DUAL mode put operation executor service shutdown flag.
-     */
-    public boolean getDualModePutExecutorServiceShutdown() {
-        return dualModePutExecShutdown;
-    }
-
-    /**
-     * Set DUAL mode put operations executor service shutdown flag.
-     *
-     * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag.
-     */
-    public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) {
-        this.dualModePutExecShutdown = dualModePutExecShutdown;
-    }
-
-    /**
-     * Get maximum amount of pending data read from the secondary file system and waiting to be written to data
-     * cache. {@code 0} or negative value stands for unlimited size.
-     * <p>
-     * By default this value is set to {@code 0}. It is recommended to set positive value in case your
-     * application performs frequent reads of large amount of data from the secondary file system in order to
-     * avoid issues with increasing GC pauses or out-of-memory error.
-     *
-     * @return Maximum amount of pending data read from the secondary file system
-     */
-    public long getDualModeMaxPendingPutsSize() {
-        return dualModeMaxPendingPutsSize;
-    }
-
-    /**
-     * Set maximum amount of data in pending put operations.
-     *
-     * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations.
-     */
-    public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) {
-        this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
-    }
-
-    /**
-     * Get maximum default range size of a file being split during IGFS task execution. When IGFS task is about to
-     * be executed, it requests file block locations first. Each location is defined as {@link org.apache.ignite.igfs.mapreduce.IgfsFileRange} which
-     * has length. In case this parameter is set to positive value, then IGFS will split single file range into smaller
-     * ranges with length not greater that this parameter. The only exception to this case is when maximum task range
-     * length is smaller than file block size. In this case maximum task range size will be overridden and set to file
-     * block size.
-     * <p>
-     * Note that this parameter is applied when task is split into jobs before {@link org.apache.ignite.igfs.mapreduce.IgfsRecordResolver} is
-     * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this
-     * parameter depending on file data layout and selected resolver type.
-     * <p>
-     * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks
-     * so that task execution suffers from insufficient parallelism. E.g., in case you have one IGFS node in topology
-     * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in
-     * a single job which will be processed in one thread. But in case you provide this configuration parameter and set
-     * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in
-     * parallel.
-     * <p>
-     * Note that some {@code IgniteFs.execute()} methods can override value of this parameter.
-     * <p>
-     * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is
-     * {@code 0}.
-     *
-     * @return Maximum range size of a file being split during IGFS task execution.
-     */
-    public long getMaximumTaskRangeLength() {
-        return maxTaskRangeLen;
-    }
-
-    /**
-     * Set maximum default range size of a file being split during IGFS task execution.
-     * See {@link #getMaximumTaskRangeLength()} for more details.
-     *
-     * @param maxTaskRangeLen Set maximum default range size of a file being split during IGFS task execution.
-     */
-    public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
-        this.maxTaskRangeLen = maxTaskRangeLen;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgfsConfiguration.class, this);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
index cf88778..8bd2f83 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
@@ -20,7 +20,6 @@ package org.apache.ignite.configuration;
 import org.apache.ignite.*;
 import org.apache.ignite.events.*;
 import org.apache.ignite.internal.managers.eventstorage.*;
-import org.apache.ignite.internal.processors.hadoop.*;
 import org.apache.ignite.internal.util.typedef.internal.*;
 import org.apache.ignite.lang.*;
 import org.apache.ignite.lifecycle.*;
@@ -345,7 +344,7 @@ public class IgniteConfiguration {
     private Map<IgnitePredicate<? extends Event>, int[]> lsnrs;
 
     /** IGFS configuration. */
-    private IgfsConfiguration[] igfsCfg;
+    private FileSystemConfiguration[] igfsCfg;
 
     /** Streamer configuration. */
     private StreamerConfiguration[] streamerCfg;
@@ -354,7 +353,7 @@ public class IgniteConfiguration {
     private ServiceConfiguration[] svcCfgs;
 
     /** Hadoop configuration. */
-    private GridHadoopConfiguration hadoopCfg;
+    private HadoopConfiguration hadoopCfg;
 
     /** Client access configuration. */
     private ConnectorConfiguration connectorCfg = new ConnectorConfiguration();
@@ -418,7 +417,7 @@ public class IgniteConfiguration {
         ggHome = cfg.getIgniteHome();
         ggWork = cfg.getWorkDirectory();
         gridName = cfg.getGridName();
-        igfsCfg = cfg.getIgfsConfiguration();
+        igfsCfg = cfg.getFileSystemConfiguration();
         igfsPoolSize = cfg.getIgfsThreadPoolSize();
         hadoopCfg = cfg.getHadoopConfiguration();
         inclEvtTypes = cfg.getIncludeEventTypes();
@@ -1728,20 +1727,20 @@ public class IgniteConfiguration {
     }
 
     /**
-     * Gets IGFS configurations.
+     * Gets IGFS (Ignite In-Memory File System) configurations.
      *
      * @return IGFS configurations.
      */
-    public IgfsConfiguration[] getIgfsConfiguration() {
+    public FileSystemConfiguration[] getFileSystemConfiguration() {
         return igfsCfg;
     }
 
     /**
-     * Sets IGFS configurations.
+     * Sets IGFS (Ignite In-Memory File System) configurations.
      *
      * @param igfsCfg IGFS configurations.
      */
-    public void setIgfsConfiguration(IgfsConfiguration... igfsCfg) {
+    public void setFileSystemConfiguration(FileSystemConfiguration... igfsCfg) {
         this.igfsCfg = igfsCfg;
     }
 
@@ -1768,7 +1767,7 @@ public class IgniteConfiguration {
      *
      * @return Hadoop configuration.
      */
-    public GridHadoopConfiguration getHadoopConfiguration() {
+    public HadoopConfiguration getHadoopConfiguration() {
         return hadoopCfg;
     }
 
@@ -1777,7 +1776,7 @@ public class IgniteConfiguration {
      *
      * @param hadoopCfg Hadoop configuration.
      */
-    public void setHadoopConfiguration(GridHadoopConfiguration hadoopCfg) {
+    public void setHadoopConfiguration(HadoopConfiguration hadoopCfg) {
         this.hadoopCfg = hadoopCfg;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/igfs/Igfs.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/Igfs.java b/modules/core/src/main/java/org/apache/ignite/igfs/Igfs.java
deleted file mode 100644
index 48b9b58..0000000
--- a/modules/core/src/main/java/org/apache/ignite/igfs/Igfs.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.ignite.*;
-import org.jetbrains.annotations.*;
-
-import java.io.*;
-import java.util.*;
-
-/**
- * Common file system interface. It provides a typical generalized "view" of any file system:
- * <ul>
- *     <li>list directories or get information for a single path</li>
- *     <li>create/move/delete files or directories</li>
- *     <li>write/read data streams into/from files</li>
- * </ul>
- *
- * This is the minimum of functionality that is needed to work as secondary file system in dual modes of IGFS.
- */
-public interface Igfs {
-    /** File property: user name. */
-    public static final String PROP_USER_NAME = "usrName";
-
-    /** File property: group name. */
-    public static final String PROP_GROUP_NAME = "grpName";
-
-    /** File property: permission. */
-    public static final String PROP_PERMISSION = "permission";
-
-    /**
-     * Checks if the specified path exists in the file system.
-     *
-     * @param path Path to check for existence in the file system.
-     * @return {@code True} if such file exists, otherwise - {@code false}.
-     * @throws IgniteException In case of error.
-     */
-    public boolean exists(IgfsPath path);
-
-    /**
-     * Updates file information for the specified path. Existent properties, not listed in the passed collection,
-     * will not be affected. Other properties will be added or overwritten. Passed properties with {@code null} values
-     * will be removed from the stored properties or ignored if they don't exist in the file info.
-     * <p>
-     * When working in {@code DUAL_SYNC} or {@code DUAL_ASYNC} modes only the following properties will be propagated
-     * to the secondary file system:
-     * <ul>
-     * <li>{@code usrName} - file owner name;</li>
-     * <li>{@code grpName} - file owner group;</li>
-     * <li>{@code permission} - Unix-style string representing file permissions.</li>
-     * </ul>
-     *
-     * @param path File path to set properties for.
-     * @param props Properties to update.
-     * @return File information for specified path or {@code null} if such path does not exist.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteException;
-
-    /**
-     * Renames/moves a file.
-     * <p>
-     * You are free to rename/move data files as you wish, but directories can be only renamed.
-     * You cannot move the directory between different parent directories.
-     * <p>
-     * Examples:
-     * <ul>
-     *     <li>"/work/file.txt" => "/home/project/Presentation Scenario.txt"</li>
-     *     <li>"/work" => "/work-2012.bkp"</li>
-     *     <li>"/work" => "<strike>/backups/work</strike>" - such operation is restricted for directories.</li>
-     * </ul>
-     *
-     * @param src Source file path to rename.
-     * @param dest Destination file path. If destination path is a directory, then source file will be placed
-     *     into destination directory with original name.
-     * @throws IgniteException In case of error.
-     * @throws IgfsFileNotFoundException If source file doesn't exist.
-     */
-    public void rename(IgfsPath src, IgfsPath dest) throws IgniteException;
-
-    /**
-     * Deletes file.
-     *
-     * @param path File path to delete.
-     * @param recursive Delete non-empty directories recursively.
-     * @return {@code True} in case of success, {@code false} otherwise.
-     * @throws IgniteException In case of error.
-     */
-    public boolean delete(IgfsPath path, boolean recursive) throws IgniteException;
-
-    /**
-     * Creates directories under specified path.
-     *
-     * @param path Path of directories chain to create.
-     * @throws IgniteException In case of error.
-     */
-    public void mkdirs(IgfsPath path) throws IgniteException;
-
-    /**
-     * Creates directories under specified path with the specified properties.
-     *
-     * @param path Path of directories chain to create.
-     * @param props Metadata properties to set on created directories.
-     * @throws IgniteException In case of error.
-     */
-    public void mkdirs(IgfsPath path, @Nullable Map<String, String> props) throws IgniteException;
-
-    /**
-     * Lists file paths under the specified path.
-     *
-     * @param path Path to list files under.
-     * @return List of files under the specified path.
-     * @throws IgniteException In case of error.
-     * @throws IgfsFileNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteException;
-
-    /**
-     * Lists files under the specified path.
-     *
-     * @param path Path to list files under.
-     * @return List of files under the specified path.
-     * @throws IgniteException In case of error.
-     * @throws IgfsFileNotFoundException If path doesn't exist.
-     */
-    public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteException;
-
-    /**
-     * Opens a file for reading.
-     *
-     * @param path File path to read.
-     * @param bufSize Read buffer size (bytes) or {@code zero} to use default value.
-     * @return File input stream to read data from.
-     * @throws IgniteException In case of error.
-     * @throws IgfsFileNotFoundException If path doesn't exist.
-     */
-    public IgfsReader open(IgfsPath path, int bufSize) throws IgniteException;
-
-    /**
-     * Creates a file and opens it for writing.
-     *
-     * @param path File path to create.
-     * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory.
-     * @return File output stream to write data to.
-     * @throws IgniteException In case of error.
-     */
-    public OutputStream create(IgfsPath path, boolean overwrite) throws IgniteException;
-
-    /**
-     * Creates a file and opens it for writing.
-     *
-     * @param path File path to create.
-     * @param bufSize Write buffer size (bytes) or {@code zero} to use default value.
-     * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory.
-     * @param replication Replication factor.
-     * @param blockSize Block size.
-     * @param props File properties to set.
-     * @return File output stream to write data to.
-     * @throws IgniteException In case of error.
-     */
-    public OutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication, long blockSize,
-       @Nullable Map<String, String> props) throws IgniteException;
-
-    /**
-     * Opens an output stream to an existing file for appending data.
-     *
-     * @param path File path to append.
-     * @param bufSize Write buffer size (bytes) or {@code zero} to use default value.
-     * @param create Create file if it doesn't exist yet.
-     * @param props File properties to set only in case it file was just created.
-     * @return File output stream to append data to.
-     * @throws IgniteException In case of error.
-     * @throws IgfsFileNotFoundException If path doesn't exist and create flag is {@code false}.
-     */
-    public OutputStream append(IgfsPath path, int bufSize, boolean create, @Nullable Map<String, String> props)
-        throws IgniteException;
-
-    /**
-     * Gets file information for the specified path.
-     *
-     * @param path Path to get information for.
-     * @return File information for specified path or {@code null} if such path does not exist.
-     * @throws IgniteException In case of error.
-     */
-    public IgfsFile info(IgfsPath path) throws IgniteException;
-
-    /**
-     * Gets used space in bytes.
-     *
-     * @return Used space in bytes.
-     * @throws IgniteException In case of error.
-     */
-    public long usedSpaceSize() throws IgniteException;
-
-    /**
-     * Gets the implementation specific properties of file system.
-     *
-     * @return Map of properties.
-     */
-    public Map<String,String> properties();
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java
index c4f28c6..afd0314 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java
@@ -22,7 +22,7 @@ import java.util.*;
 /**
  * {@code IGFS} file's data block location in the grid. It is used to determine
  * node affinity of a certain file block within the Grid by calling
- * {@link org.apache.ignite.IgniteFs#affinity(IgfsPath, long, long)} method.
+ * {@link org.apache.ignite.IgniteFileSystem#affinity(IgfsPath, long, long)} method.
  */
 public interface IgfsBlockLocation {
     /**

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java
index 172dca1..550679a 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java
@@ -77,7 +77,7 @@ public interface IgfsFile {
     /**
      * Gets file last access time. File last access time is not updated automatically due to
      * performance considerations and can be updated on demand with
-     * {@link org.apache.ignite.IgniteFs#setTimes(IgfsPath, long, long)} method.
+     * {@link org.apache.ignite.IgniteFileSystem#setTimes(IgfsPath, long, long)} method.
      * <p>
      * By default last access time equals file creation time.
      *

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java
index 308dbcb..c2ddbb0 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java
@@ -17,13 +17,15 @@
 
 package org.apache.ignite.igfs;
 
+import org.apache.ignite.igfs.secondary.*;
+
 import java.io.*;
 
 /**
  * {@code IGFS} input stream to read data from the file system.
  * It provides several additional methods for asynchronous access.
  */
-public abstract class IgfsInputStream extends InputStream implements IgfsReader {
+public abstract class IgfsInputStream extends InputStream {
     /**
      * Gets file length during file open.
      *
@@ -76,5 +78,5 @@ public abstract class IgfsInputStream extends InputStream implements IgfsReader
      * @return Total number of bytes read into the buffer, or -1 if there is no more data (EOF).
      * @throws IOException In case of IO exception.
      */
-    @Override public abstract int read(long pos, byte[] buf, int off, int len) throws IOException;
+    public abstract int read(long pos, byte[] buf, int off, int len) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
index afdae1a..50b5435 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java
@@ -33,7 +33,7 @@ public interface IgfsMetrics {
 
     /**
      * Gets maximum amount of data that can be stored on local node. This metrics is either
-     * equal to {@link org.apache.ignite.configuration.IgfsConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to
+     * equal to {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to
      * {@code 80%} of maximum heap size allocated for JVM.
      *
      * @return Maximum IGFS local space size.

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/498dcfab/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
index 3c440ab..2c9fcdd 100644
--- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
+++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java
@@ -24,7 +24,7 @@ import org.jetbrains.annotations.*;
  * Secondary Hadoop file system is provided for pass-through, write-through, and
  * read-through purposes.
  * <p>
- * This mode is configured via {@link org.apache.ignite.configuration.IgfsConfiguration#getDefaultMode()}
+ * This mode is configured via {@link org.apache.ignite.configuration.FileSystemConfiguration#getDefaultMode()}
  * configuration property.
  */
 public enum IgfsMode {
@@ -39,7 +39,7 @@ public enum IgfsMode {
      * through to secondary Hadoop file system. If this mode is enabled, then
      * secondary Hadoop file system must be configured.
      *
-     * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem()
+     * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem()
      */
     PROXY,
 
@@ -50,7 +50,7 @@ public enum IgfsMode {
      * If secondary Hadoop file system is not configured, then this mode behaves like
      * {@link #PRIMARY} mode.
      *
-     * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem()
+     * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem()
      */
     DUAL_SYNC,
 
@@ -61,7 +61,7 @@ public enum IgfsMode {
      * If secondary Hadoop file system is not configured, then this mode behaves like
      * {@link #PRIMARY} mode.
      *
-     * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem()
+     * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem()
      */
     DUAL_ASYNC;